swiotlb: remove unused fields in io_tlb_mem
Commit 20347fca71
("swiotlb: split up the global swiotlb lock") splits
io_tlb_mem into multiple areas. Each area has its own lock and index. The
global ones are not used so remove them.
Signed-off-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
4a97739474
commit
91561d4ecb
|
@ -79,11 +79,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||||
* @used: The number of used IO TLB block.
|
* @used: The number of used IO TLB block.
|
||||||
* @list: The free list describing the number of free entries available
|
* @list: The free list describing the number of free entries available
|
||||||
* from each index.
|
* from each index.
|
||||||
* @index: The index to start searching in the next round.
|
|
||||||
* @orig_addr: The original address corresponding to a mapped entry.
|
* @orig_addr: The original address corresponding to a mapped entry.
|
||||||
* @alloc_size: Size of the allocated buffer.
|
* @alloc_size: Size of the allocated buffer.
|
||||||
* @lock: The lock to protect the above data structures in the map and
|
|
||||||
* unmap calls.
|
|
||||||
* @debugfs: The dentry to debugfs.
|
* @debugfs: The dentry to debugfs.
|
||||||
* @late_alloc: %true if allocated using the page allocator
|
* @late_alloc: %true if allocated using the page allocator
|
||||||
* @force_bounce: %true if swiotlb bouncing is forced
|
* @force_bounce: %true if swiotlb bouncing is forced
|
||||||
|
@ -97,8 +94,6 @@ struct io_tlb_mem {
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
unsigned long nslabs;
|
unsigned long nslabs;
|
||||||
unsigned long used;
|
unsigned long used;
|
||||||
unsigned int index;
|
|
||||||
spinlock_t lock;
|
|
||||||
struct dentry *debugfs;
|
struct dentry *debugfs;
|
||||||
bool late_alloc;
|
bool late_alloc;
|
||||||
bool force_bounce;
|
bool force_bounce;
|
||||||
|
|
|
@ -253,14 +253,12 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||||
mem->nslabs = nslabs;
|
mem->nslabs = nslabs;
|
||||||
mem->start = start;
|
mem->start = start;
|
||||||
mem->end = mem->start + bytes;
|
mem->end = mem->start + bytes;
|
||||||
mem->index = 0;
|
|
||||||
mem->late_alloc = late_alloc;
|
mem->late_alloc = late_alloc;
|
||||||
mem->nareas = nareas;
|
mem->nareas = nareas;
|
||||||
mem->area_nslabs = nslabs / mem->nareas;
|
mem->area_nslabs = nslabs / mem->nareas;
|
||||||
|
|
||||||
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
||||||
|
|
||||||
spin_lock_init(&mem->lock);
|
|
||||||
for (i = 0; i < mem->nareas; i++) {
|
for (i = 0; i < mem->nareas; i++) {
|
||||||
spin_lock_init(&mem->areas[i].lock);
|
spin_lock_init(&mem->areas[i].lock);
|
||||||
mem->areas[i].index = 0;
|
mem->areas[i].index = 0;
|
||||||
|
|
Loading…
Reference in New Issue