binder: add functions to copy to/from binder buffers
Avoid vm_area when copying to or from binder buffers. Instead, new copy functions are added that copy from kernel space to binder buffer space. These use kmap_atomic() and kunmap_atomic() to create temporary mappings and then memcpy() is used to copy within that page. Also, kmap_atomic() / kunmap_atomic() use the appropriate cache flushing to support VIVT cache architectures. Allow binder to build if CPU_CACHE_VIVT is defined. Several uses of the new functions are added here. More to follow in subsequent patches. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
1a7c3d9bb7
commit
8ced0c6231
|
@ -10,7 +10,7 @@ if ANDROID
|
||||||
|
|
||||||
config ANDROID_BINDER_IPC
|
config ANDROID_BINDER_IPC
|
||||||
bool "Android Binder IPC Driver"
|
bool "Android Binder IPC Driver"
|
||||||
depends on MMU && !CPU_CACHE_VIVT
|
depends on MMU
|
||||||
default n
|
default n
|
||||||
---help---
|
---help---
|
||||||
Binder is used in Android for both communication between processes,
|
Binder is used in Android for both communication between processes,
|
||||||
|
|
|
@ -2244,14 +2244,22 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||||
off_end = (void *)off_start + buffer->offsets_size;
|
off_end = (void *)off_start + buffer->offsets_size;
|
||||||
for (offp = off_start; offp < off_end; offp++) {
|
for (offp = off_start; offp < off_end; offp++) {
|
||||||
struct binder_object_header *hdr;
|
struct binder_object_header *hdr;
|
||||||
size_t object_size = binder_validate_object(buffer, *offp);
|
size_t object_size;
|
||||||
|
binder_size_t object_offset;
|
||||||
|
binder_size_t buffer_offset = (uintptr_t)offp -
|
||||||
|
(uintptr_t)buffer->data;
|
||||||
|
|
||||||
|
binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
|
||||||
|
buffer, buffer_offset,
|
||||||
|
sizeof(object_offset));
|
||||||
|
object_size = binder_validate_object(buffer, object_offset);
|
||||||
if (object_size == 0) {
|
if (object_size == 0) {
|
||||||
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
|
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
|
||||||
debug_id, (u64)*offp, buffer->data_size);
|
debug_id, (u64)object_offset, buffer->data_size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
hdr = (struct binder_object_header *)(buffer->data + *offp);
|
hdr = (struct binder_object_header *)
|
||||||
|
(buffer->data + object_offset);
|
||||||
switch (hdr->type) {
|
switch (hdr->type) {
|
||||||
case BINDER_TYPE_BINDER:
|
case BINDER_TYPE_BINDER:
|
||||||
case BINDER_TYPE_WEAK_BINDER: {
|
case BINDER_TYPE_WEAK_BINDER: {
|
||||||
|
@ -2359,8 +2367,20 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
|
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
|
||||||
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
|
for (fd_index = 0; fd_index < fda->num_fds;
|
||||||
binder_deferred_fd_close(fd_array[fd_index]);
|
fd_index++) {
|
||||||
|
u32 fd;
|
||||||
|
binder_size_t offset =
|
||||||
|
(uintptr_t)&fd_array[fd_index] -
|
||||||
|
(uintptr_t)buffer->data;
|
||||||
|
|
||||||
|
binder_alloc_copy_from_buffer(&proc->alloc,
|
||||||
|
&fd,
|
||||||
|
buffer,
|
||||||
|
offset,
|
||||||
|
sizeof(fd));
|
||||||
|
binder_deferred_fd_close(fd);
|
||||||
|
}
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
pr_err("transaction release %d bad object type %x\n",
|
pr_err("transaction release %d bad object type %x\n",
|
||||||
|
@ -2496,7 +2516,7 @@ done:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int binder_translate_fd(u32 *fdp,
|
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
||||||
struct binder_transaction *t,
|
struct binder_transaction *t,
|
||||||
struct binder_thread *thread,
|
struct binder_thread *thread,
|
||||||
struct binder_transaction *in_reply_to)
|
struct binder_transaction *in_reply_to)
|
||||||
|
@ -2507,7 +2527,6 @@ static int binder_translate_fd(u32 *fdp,
|
||||||
struct file *file;
|
struct file *file;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool target_allows_fd;
|
bool target_allows_fd;
|
||||||
int fd = *fdp;
|
|
||||||
|
|
||||||
if (in_reply_to)
|
if (in_reply_to)
|
||||||
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
||||||
|
@ -2546,7 +2565,7 @@ static int binder_translate_fd(u32 *fdp,
|
||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
}
|
}
|
||||||
fixup->file = file;
|
fixup->file = file;
|
||||||
fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
|
fixup->offset = fd_offset;
|
||||||
trace_binder_transaction_fd_send(t, fd, fixup->offset);
|
trace_binder_transaction_fd_send(t, fd, fixup->offset);
|
||||||
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
|
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
|
||||||
|
|
||||||
|
@ -2598,8 +2617,17 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
||||||
int ret = binder_translate_fd(&fd_array[fdi], t, thread,
|
u32 fd;
|
||||||
in_reply_to);
|
int ret;
|
||||||
|
binder_size_t offset =
|
||||||
|
(uintptr_t)&fd_array[fdi] -
|
||||||
|
(uintptr_t)t->buffer->data;
|
||||||
|
|
||||||
|
binder_alloc_copy_from_buffer(&target_proc->alloc,
|
||||||
|
&fd, t->buffer,
|
||||||
|
offset, sizeof(fd));
|
||||||
|
ret = binder_translate_fd(fd, offset, t, thread,
|
||||||
|
in_reply_to);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -3066,7 +3094,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
|
|
||||||
t->security_ctx = (uintptr_t)kptr +
|
t->security_ctx = (uintptr_t)kptr +
|
||||||
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
|
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
|
||||||
memcpy(kptr, secctx, secctx_sz);
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
||||||
|
t->buffer, buf_offset,
|
||||||
|
secctx, secctx_sz);
|
||||||
security_release_secctx(secctx, secctx_sz);
|
security_release_secctx(secctx, secctx_sz);
|
||||||
secctx = NULL;
|
secctx = NULL;
|
||||||
}
|
}
|
||||||
|
@ -3128,11 +3158,21 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
off_min = 0;
|
off_min = 0;
|
||||||
for (; offp < off_end; offp++) {
|
for (; offp < off_end; offp++) {
|
||||||
struct binder_object_header *hdr;
|
struct binder_object_header *hdr;
|
||||||
size_t object_size = binder_validate_object(t->buffer, *offp);
|
size_t object_size;
|
||||||
|
binder_size_t object_offset;
|
||||||
|
binder_size_t buffer_offset =
|
||||||
|
(uintptr_t)offp - (uintptr_t)t->buffer->data;
|
||||||
|
|
||||||
if (object_size == 0 || *offp < off_min) {
|
binder_alloc_copy_from_buffer(&target_proc->alloc,
|
||||||
|
&object_offset,
|
||||||
|
t->buffer,
|
||||||
|
buffer_offset,
|
||||||
|
sizeof(object_offset));
|
||||||
|
object_size = binder_validate_object(t->buffer, object_offset);
|
||||||
|
if (object_size == 0 || object_offset < off_min) {
|
||||||
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
|
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
|
||||||
proc->pid, thread->pid, (u64)*offp,
|
proc->pid, thread->pid,
|
||||||
|
(u64)object_offset,
|
||||||
(u64)off_min,
|
(u64)off_min,
|
||||||
(u64)t->buffer->data_size);
|
(u64)t->buffer->data_size);
|
||||||
return_error = BR_FAILED_REPLY;
|
return_error = BR_FAILED_REPLY;
|
||||||
|
@ -3141,8 +3181,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
goto err_bad_offset;
|
goto err_bad_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
hdr = (struct binder_object_header *)(t->buffer->data + *offp);
|
hdr = (struct binder_object_header *)
|
||||||
off_min = *offp + object_size;
|
(t->buffer->data + object_offset);
|
||||||
|
off_min = object_offset + object_size;
|
||||||
switch (hdr->type) {
|
switch (hdr->type) {
|
||||||
case BINDER_TYPE_BINDER:
|
case BINDER_TYPE_BINDER:
|
||||||
case BINDER_TYPE_WEAK_BINDER: {
|
case BINDER_TYPE_WEAK_BINDER: {
|
||||||
|
@ -3173,8 +3214,10 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
|
|
||||||
case BINDER_TYPE_FD: {
|
case BINDER_TYPE_FD: {
|
||||||
struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
||||||
int ret = binder_translate_fd(&fp->fd, t, thread,
|
binder_size_t fd_offset = object_offset +
|
||||||
in_reply_to);
|
(uintptr_t)&fp->fd - (uintptr_t)fp;
|
||||||
|
int ret = binder_translate_fd(fp->fd, fd_offset, t,
|
||||||
|
thread, in_reply_to);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return_error = BR_FAILED_REPLY;
|
return_error = BR_FAILED_REPLY;
|
||||||
|
@ -3967,6 +4010,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* binder_apply_fd_fixups() - finish fd translation
|
* binder_apply_fd_fixups() - finish fd translation
|
||||||
|
* @proc: binder_proc associated @t->buffer
|
||||||
* @t: binder transaction with list of fd fixups
|
* @t: binder transaction with list of fd fixups
|
||||||
*
|
*
|
||||||
* Now that we are in the context of the transaction target
|
* Now that we are in the context of the transaction target
|
||||||
|
@ -3978,14 +4022,14 @@ static int binder_wait_for_work(struct binder_thread *thread,
|
||||||
* fput'ing files that have not been processed and ksys_close'ing
|
* fput'ing files that have not been processed and ksys_close'ing
|
||||||
* any fds that have already been allocated.
|
* any fds that have already been allocated.
|
||||||
*/
|
*/
|
||||||
static int binder_apply_fd_fixups(struct binder_transaction *t)
|
static int binder_apply_fd_fixups(struct binder_proc *proc,
|
||||||
|
struct binder_transaction *t)
|
||||||
{
|
{
|
||||||
struct binder_txn_fd_fixup *fixup, *tmp;
|
struct binder_txn_fd_fixup *fixup, *tmp;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
|
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
|
||||||
int fd = get_unused_fd_flags(O_CLOEXEC);
|
int fd = get_unused_fd_flags(O_CLOEXEC);
|
||||||
u32 *fdp;
|
|
||||||
|
|
||||||
if (fd < 0) {
|
if (fd < 0) {
|
||||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||||
|
@ -4000,33 +4044,20 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
|
||||||
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
|
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
|
||||||
fd_install(fd, fixup->file);
|
fd_install(fd, fixup->file);
|
||||||
fixup->file = NULL;
|
fixup->file = NULL;
|
||||||
fdp = (u32 *)(t->buffer->data + fixup->offset);
|
binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
|
||||||
/*
|
fixup->offset, &fd,
|
||||||
* This store can cause problems for CPUs with a
|
sizeof(u32));
|
||||||
* VIVT cache (eg ARMv5) since the cache cannot
|
|
||||||
* detect virtual aliases to the same physical cacheline.
|
|
||||||
* To support VIVT, this address and the user-space VA
|
|
||||||
* would both need to be flushed. Since this kernel
|
|
||||||
* VA is not constructed via page_to_virt(), we can't
|
|
||||||
* use flush_dcache_page() on it, so we'd have to use
|
|
||||||
* an internal function. If devices with VIVT ever
|
|
||||||
* need to run Android, we'll either need to go back
|
|
||||||
* to patching the translated fd from the sender side
|
|
||||||
* (using the non-standard kernel functions), or rework
|
|
||||||
* how the kernel uses the buffer to use page_to_virt()
|
|
||||||
* addresses instead of allocating in our own vm area.
|
|
||||||
*
|
|
||||||
* For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
|
|
||||||
*/
|
|
||||||
*fdp = fd;
|
|
||||||
}
|
}
|
||||||
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
||||||
if (fixup->file) {
|
if (fixup->file) {
|
||||||
fput(fixup->file);
|
fput(fixup->file);
|
||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
|
u32 fd;
|
||||||
|
|
||||||
binder_deferred_fd_close(*fdp);
|
binder_alloc_copy_from_buffer(&proc->alloc, &fd,
|
||||||
|
t->buffer, fixup->offset,
|
||||||
|
sizeof(fd));
|
||||||
|
binder_deferred_fd_close(fd);
|
||||||
}
|
}
|
||||||
list_del(&fixup->fixup_entry);
|
list_del(&fixup->fixup_entry);
|
||||||
kfree(fixup);
|
kfree(fixup);
|
||||||
|
@ -4324,7 +4355,7 @@ retry:
|
||||||
trd->sender_pid = 0;
|
trd->sender_pid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = binder_apply_fd_fixups(t);
|
ret = binder_apply_fd_fixups(proc, t);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
struct binder_buffer *buffer = t->buffer;
|
struct binder_buffer *buffer = t->buffer;
|
||||||
bool oneway = !!(t->flags & TF_ONE_WAY);
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
||||||
|
|
|
@ -1166,3 +1166,62 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
|
||||||
|
bool to_buffer,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
binder_size_t buffer_offset,
|
||||||
|
void *ptr,
|
||||||
|
size_t bytes)
|
||||||
|
{
|
||||||
|
/* All copies must be 32-bit aligned and 32-bit size */
|
||||||
|
BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
|
||||||
|
|
||||||
|
while (bytes) {
|
||||||
|
unsigned long size;
|
||||||
|
struct page *page;
|
||||||
|
pgoff_t pgoff;
|
||||||
|
void *tmpptr;
|
||||||
|
void *base_ptr;
|
||||||
|
|
||||||
|
page = binder_alloc_get_page(alloc, buffer,
|
||||||
|
buffer_offset, &pgoff);
|
||||||
|
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
|
||||||
|
base_ptr = kmap_atomic(page);
|
||||||
|
tmpptr = base_ptr + pgoff;
|
||||||
|
if (to_buffer)
|
||||||
|
memcpy(tmpptr, ptr, size);
|
||||||
|
else
|
||||||
|
memcpy(ptr, tmpptr, size);
|
||||||
|
/*
|
||||||
|
* kunmap_atomic() takes care of flushing the cache
|
||||||
|
* if this device has VIVT cache arch
|
||||||
|
*/
|
||||||
|
kunmap_atomic(base_ptr);
|
||||||
|
bytes -= size;
|
||||||
|
pgoff = 0;
|
||||||
|
ptr = ptr + size;
|
||||||
|
buffer_offset += size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
binder_size_t buffer_offset,
|
||||||
|
void *src,
|
||||||
|
size_t bytes)
|
||||||
|
{
|
||||||
|
binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
|
||||||
|
src, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
|
||||||
|
void *dest,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
binder_size_t buffer_offset,
|
||||||
|
size_t bytes)
|
||||||
|
{
|
||||||
|
binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
|
||||||
|
dest, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -191,5 +191,17 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
|
||||||
const void __user *from,
|
const void __user *from,
|
||||||
size_t bytes);
|
size_t bytes);
|
||||||
|
|
||||||
|
void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
binder_size_t buffer_offset,
|
||||||
|
void *src,
|
||||||
|
size_t bytes);
|
||||||
|
|
||||||
|
void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
|
||||||
|
void *dest,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
binder_size_t buffer_offset,
|
||||||
|
size_t bytes);
|
||||||
|
|
||||||
#endif /* _LINUX_BINDER_ALLOC_H */
|
#endif /* _LINUX_BINDER_ALLOC_H */
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue