diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f65ff1930120..d3ae03edbbd2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -79,11 +79,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. - * @index: The index to start searching in the next round. * @orig_addr: The original address corresponding to a mapped entry. * @alloc_size: Size of the allocated buffer. - * @lock: The lock to protect the above data structures in the map and - * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced @@ -97,8 +94,6 @@ struct io_tlb_mem { void *vaddr; unsigned long nslabs; unsigned long used; - unsigned int index; - spinlock_t lock; struct dentry *debugfs; bool late_alloc; bool force_bounce; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c50e6fe20f37..cbffa0b1ace5 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -253,14 +253,12 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->nslabs = nslabs; mem->start = start; mem->end = mem->start + bytes; - mem->index = 0; mem->late_alloc = late_alloc; mem->nareas = nareas; mem->area_nslabs = nslabs / mem->nareas; mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); - spin_lock_init(&mem->lock); for (i = 0; i < mem->nareas; i++) { spin_lock_init(&mem->areas[i].lock); mem->areas[i].index = 0;