binder: rename alloc->vma_vm_mm to alloc->mm
Rename ->vma_vm_mm to ->mm to reflect the fact that we no longer cache this reference from vma->vm_mm but from current->mm instead. No functional changes in this patch. Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org> Acked-by: Todd Kjos <tkjos@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20220906135948.3048225-2-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e3c9b0ddfd
commit
e66b77e505
|
@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
}
|
||||
}
|
||||
|
||||
if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
|
||||
mm = alloc->vma_vm_mm;
|
||||
if (need_mm && mmget_not_zero(alloc->mm))
|
||||
mm = alloc->mm;
|
||||
|
||||
if (mm) {
|
||||
mmap_read_lock(mm);
|
||||
|
@ -322,9 +322,9 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
|||
*/
|
||||
if (vma) {
|
||||
vm_start = vma->vm_start;
|
||||
mmap_assert_write_locked(alloc->vma_vm_mm);
|
||||
mmap_assert_write_locked(alloc->mm);
|
||||
} else {
|
||||
mmap_assert_locked(alloc->vma_vm_mm);
|
||||
mmap_assert_locked(alloc->mm);
|
||||
}
|
||||
|
||||
alloc->vma_addr = vm_start;
|
||||
|
@ -336,7 +336,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
|
|||
struct vm_area_struct *vma = NULL;
|
||||
|
||||
if (alloc->vma_addr)
|
||||
vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
|
||||
vma = vma_lookup(alloc->mm, alloc->vma_addr);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
@ -401,15 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
|||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
mmap_read_lock(alloc->mm);
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
mmap_read_unlock(alloc->mm);
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
mmap_read_unlock(alloc->mm);
|
||||
|
||||
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
|
||||
ALIGN(offsets_size, sizeof(void *));
|
||||
|
@ -823,7 +823,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
BUG_ON(alloc->vma_addr &&
|
||||
vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
|
||||
vma_lookup(alloc->mm, alloc->vma_addr));
|
||||
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
|
@ -873,8 +873,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
kfree(alloc->pages);
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
if (alloc->vma_vm_mm)
|
||||
mmdrop(alloc->vma_vm_mm);
|
||||
if (alloc->mm)
|
||||
mmdrop(alloc->mm);
|
||||
|
||||
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
|
||||
"%s: %d buffers %d, pages %d\n",
|
||||
|
@ -931,13 +931,13 @@ void binder_alloc_print_pages(struct seq_file *m,
|
|||
* read inconsistent state.
|
||||
*/
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
mmap_read_lock(alloc->mm);
|
||||
if (binder_alloc_get_vma(alloc) == NULL) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
mmap_read_unlock(alloc->mm);
|
||||
goto uninitialized;
|
||||
}
|
||||
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
mmap_read_unlock(alloc->mm);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
|
@ -1020,7 +1020,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
index = page - alloc->pages;
|
||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||
|
||||
mm = alloc->vma_vm_mm;
|
||||
mm = alloc->mm;
|
||||
if (!mmget_not_zero(mm))
|
||||
goto err_mmget;
|
||||
if (!mmap_read_trylock(mm))
|
||||
|
@ -1089,8 +1089,8 @@ static struct shrinker binder_shrinker = {
|
|||
void binder_alloc_init(struct binder_alloc *alloc)
|
||||
{
|
||||
alloc->pid = current->group_leader->pid;
|
||||
alloc->vma_vm_mm = current->mm;
|
||||
mmgrab(alloc->vma_vm_mm);
|
||||
alloc->mm = current->mm;
|
||||
mmgrab(alloc->mm);
|
||||
mutex_init(&alloc->mutex);
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ struct binder_lru_page {
|
|||
* (invariant after mmap)
|
||||
* @tsk: tid for task that called init for this proc
|
||||
* (invariant after init)
|
||||
* @vma_vm_mm: copy of vma->vm_mm (invariant after mmap)
|
||||
* @mm: copy of task->mm (invariant after open)
|
||||
* @buffer: base of per-proc address space mapped via mmap
|
||||
* @buffers: list of all buffers for this proc
|
||||
* @free_buffers: rb tree of buffers available for allocation
|
||||
|
@ -101,7 +101,7 @@ struct binder_lru_page {
|
|||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
unsigned long vma_addr;
|
||||
struct mm_struct *vma_vm_mm;
|
||||
struct mm_struct *mm;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
struct rb_root free_buffers;
|
||||
|
|
Loading…
Reference in New Issue