swiotlb: add documentation and rename swiotlb_do_find_slots()
Add some kernel-doc comments and move the existing documentation of struct
io_tlb_slot to its correct location. The latter was forgotten in commit
942a8186eb
("swiotlb: move struct io_tlb_slot to swiotlb.c").
Use the opportunity to give swiotlb_do_find_slots() a more descriptive name
and make it clear how it differs from swiotlb_find_slots().
Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
05ee774122
commit
fea18777a7
|
@ -76,10 +76,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
|||
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
|
||||
* @end. For default swiotlb, this is command line adjustable via
|
||||
* setup_io_tlb_npages.
|
||||
* @list: The free list describing the number of free entries available
|
||||
* from each index.
|
||||
* @orig_addr: The original address corresponding to a mapped entry.
|
||||
* @alloc_size: Size of the allocated buffer.
|
||||
* @debugfs: The dentry to debugfs.
|
||||
* @late_alloc: %true if allocated using the page allocator
|
||||
* @force_bounce: %true if swiotlb bouncing is forced
|
||||
|
@ -111,6 +107,17 @@ struct io_tlb_mem {
|
|||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
|
||||
* @dev: Device which has mapped the buffer.
|
||||
* @paddr: Physical address within the DMA buffer.
|
||||
*
|
||||
* Check if @paddr points into a bounce buffer.
|
||||
*
|
||||
* Return:
|
||||
* * %true if @paddr points into a bounce buffer
|
||||
* * %false otherwise
|
||||
*/
|
||||
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
|
|
|
@ -62,6 +62,13 @@
|
|||
|
||||
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
|
||||
|
||||
/**
|
||||
* struct io_tlb_slot - IO TLB slot descriptor
|
||||
* @orig_addr: The original address corresponding to a mapped entry.
|
||||
* @alloc_size: Size of the allocated buffer.
|
||||
* @list: The free list describing the number of free entries available
|
||||
* from each index.
|
||||
*/
|
||||
struct io_tlb_slot {
|
||||
phys_addr_t orig_addr;
|
||||
size_t alloc_size;
|
||||
|
@ -635,11 +642,22 @@ static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
|
|||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
/*
|
||||
* Find a suitable number of IO TLB entries size that will fit this request and
|
||||
* allocate a buffer from that IO TLB pool.
|
||||
/**
|
||||
* swiotlb_area_find_slots() - search for slots in one IO TLB memory area
|
||||
* @dev: Device which maps the buffer.
|
||||
* @area_index: Index of the IO TLB memory area to be searched.
|
||||
* @orig_addr: Original (non-bounced) IO buffer address.
|
||||
* @alloc_size: Total requested size of the bounce buffer,
|
||||
* including initial alignment padding.
|
||||
* @alloc_align_mask: Required alignment of the allocated buffer.
|
||||
*
|
||||
* Find a suitable sequence of IO TLB entries for the request and allocate
|
||||
* a buffer from the given IO TLB memory area.
|
||||
* This function takes care of locking.
|
||||
*
|
||||
* Return: Index of the first allocated slot, or -1 on error.
|
||||
*/
|
||||
static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
static int swiotlb_area_find_slots(struct device *dev, int area_index,
|
||||
phys_addr_t orig_addr, size_t alloc_size,
|
||||
unsigned int alloc_align_mask)
|
||||
{
|
||||
|
@ -734,6 +752,19 @@ found:
|
|||
return slot_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_find_slots() - search for slots in the whole swiotlb
|
||||
* @dev: Device which maps the buffer.
|
||||
* @orig_addr: Original (non-bounced) IO buffer address.
|
||||
* @alloc_size: Total requested size of the bounce buffer,
|
||||
* including initial alignment padding.
|
||||
* @alloc_align_mask: Required alignment of the allocated buffer.
|
||||
*
|
||||
* Search through the whole software IO TLB to find a sequence of slots that
|
||||
* match the allocation constraints.
|
||||
*
|
||||
* Return: Index of the first allocated slot, or -1 on error.
|
||||
*/
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask)
|
||||
{
|
||||
|
@ -742,8 +773,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
|||
int i = start, index;
|
||||
|
||||
do {
|
||||
index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
|
||||
alloc_align_mask);
|
||||
index = swiotlb_area_find_slots(dev, i, orig_addr, alloc_size,
|
||||
alloc_align_mask);
|
||||
if (index >= 0)
|
||||
return index;
|
||||
if (++i >= mem->nareas)
|
||||
|
@ -755,6 +786,15 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
|||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
/**
|
||||
* mem_used() - get number of used slots in an allocator
|
||||
* @mem: Software IO TLB allocator.
|
||||
*
|
||||
* The result is accurate in this version of the function, because an atomic
|
||||
* counter is available if CONFIG_DEBUG_FS is set.
|
||||
*
|
||||
* Return: Number of used slots.
|
||||
*/
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
{
|
||||
return atomic_long_read(&mem->total_used);
|
||||
|
@ -762,6 +802,15 @@ static unsigned long mem_used(struct io_tlb_mem *mem)
|
|||
|
||||
#else /* !CONFIG_DEBUG_FS */
|
||||
|
||||
/**
|
||||
* mem_used() - get number of used slots in an allocator
|
||||
* @mem: Software IO TLB allocator.
|
||||
*
|
||||
* The result is not accurate, because there is no locking of individual
|
||||
* areas.
|
||||
*
|
||||
* Return: Approximate number of used slots.
|
||||
*/
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
{
|
||||
int i;
|
||||
|
|
Loading…
Reference in New Issue