2019-05-29 22:17:56 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-11-30 19:18:14 +08:00
|
|
|
/* binder.c
|
|
|
|
*
|
|
|
|
* Android IPC Subsystem
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007-2008 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
2017-06-30 03:02:00 +08:00
|
|
|
/*
|
|
|
|
* Locking overview
|
|
|
|
*
|
|
|
|
* There are 3 main spinlocks which must be acquired in the
|
|
|
|
* order shown:
|
|
|
|
*
|
|
|
|
* 1) proc->outer_lock : protects binder_ref
|
|
|
|
* binder_proc_lock() and binder_proc_unlock() are
|
|
|
|
* used to acq/rel.
|
|
|
|
* 2) node->lock : protects most fields of binder_node.
|
|
|
|
* binder_node_lock() and binder_node_unlock() are
|
|
|
|
* used to acq/rel
|
|
|
|
* 3) proc->inner_lock : protects the thread and node lists
|
2017-08-31 16:04:18 +08:00
|
|
|
* (proc->threads, proc->waiting_threads, proc->nodes)
|
|
|
|
* and all todo lists associated with the binder_proc
|
|
|
|
* (proc->todo, thread->todo, proc->delivered_death and
|
|
|
|
* node->async_todo), as well as thread->transaction_stack
|
2017-06-30 03:02:00 +08:00
|
|
|
* binder_inner_proc_lock() and binder_inner_proc_unlock()
|
|
|
|
* are used to acq/rel
|
|
|
|
*
|
|
|
|
* Any lock under procA must never be nested under any lock at the same
|
|
|
|
* level or below on procB.
|
|
|
|
*
|
|
|
|
* Functions that require a lock held on entry indicate which lock
|
|
|
|
* in the suffix of the function name:
|
|
|
|
*
|
|
|
|
* foo_olocked() : requires node->outer_lock
|
|
|
|
* foo_nlocked() : requires node->lock
|
|
|
|
* foo_ilocked() : requires proc->inner_lock
|
|
|
|
* foo_oilocked(): requires proc->outer_lock and proc->inner_lock
|
|
|
|
* foo_nilocked(): requires node->lock and proc->inner_lock
|
|
|
|
* ...
|
|
|
|
*/
|
|
|
|
|
2012-10-31 01:05:43 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
#include <linux/fdtable.h>
|
|
|
|
#include <linux/file.h>
|
2013-05-07 07:50:15 +08:00
|
|
|
#include <linux/freezer.h>
|
2011-11-30 19:18:14 +08:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/nsproxy.h>
|
|
|
|
#include <linux/poll.h>
|
2009-04-29 11:57:50 +08:00
|
|
|
#include <linux/debugfs.h>
|
2011-11-30 19:18:14 +08:00
|
|
|
#include <linux/rbtree.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:29 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2009-04-29 11:57:50 +08:00
|
|
|
#include <linux/seq_file.h>
|
2019-10-08 21:01:59 +08:00
|
|
|
#include <linux/string.h>
|
2011-11-30 19:18:14 +08:00
|
|
|
#include <linux/uaccess.h>
|
2010-03-03 06:51:53 +08:00
|
|
|
#include <linux/pid_namespace.h>
|
2015-01-21 23:54:10 +08:00
|
|
|
#include <linux/security.h>
|
2017-06-30 03:02:00 +08:00
|
|
|
#include <linux/spinlock.h>
|
2018-08-08 03:57:13 +08:00
|
|
|
#include <linux/ratelimit.h>
|
2018-08-29 04:46:25 +08:00
|
|
|
#include <linux/syscalls.h>
|
2018-12-15 07:58:21 +08:00
|
|
|
#include <linux/task_work.h>
|
2019-10-16 23:01:19 +08:00
|
|
|
#include <linux/sizes.h>
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2014-10-16 21:26:51 +08:00
|
|
|
#include <uapi/linux/android/binder.h>
|
2018-07-24 05:41:38 +08:00
|
|
|
|
2021-12-15 21:20:18 +08:00
|
|
|
#include <linux/cacheflush.h>
|
2018-07-24 05:41:38 +08:00
|
|
|
|
binder: implement binderfs
As discussed at Linux Plumbers Conference 2018 in Vancouver [1] this is the
implementation of binderfs.
/* Abstract */
binderfs is a backwards-compatible filesystem for Android's binder ipc
mechanism. Each ipc namespace will mount a new binderfs instance. Mounting
binderfs multiple times at different locations in the same ipc namespace
will not cause a new super block to be allocated and hence it will be the
same filesystem instance.
Each new binderfs mount will have its own set of binder devices only
visible in the ipc namespace it has been mounted in. All devices in a new
binderfs mount will follow the scheme binder%d and numbering will always
start at 0.
/* Backwards compatibility */
Devices requested in the Kconfig via CONFIG_ANDROID_BINDER_DEVICES for the
initial ipc namespace will work as before. They will be registered via
misc_register() and appear in the devtmpfs mount. Specifically, the
standard devices binder, hwbinder, and vndbinder will all appear in their
standard locations in /dev. Mounting or unmounting the binderfs mount in
the initial ipc namespace will have no effect on these devices, i.e. they
will neither show up in the binderfs mount nor will they disappear when the
binderfs mount is gone.
/* binder-control */
Each new binderfs instance comes with a binder-control device. No other
devices will be present at first. The binder-control device can be used to
dynamically allocate binder devices. All requests operate on the binderfs
mount the binder-control device resides in.
Assuming a new instance of binderfs has been mounted at /dev/binderfs
via mount -t binderfs binderfs /dev/binderfs. Then a request to create a
new binder device can be made as illustrated in [2].
Binderfs devices can simply be removed via unlink().
/* Implementation details */
- dynamic major number allocation:
When binderfs is registered as a new filesystem it will dynamically
allocate a new major number. The allocated major number will be returned
in struct binderfs_device when a new binder device is allocated.
- global minor number tracking:
Minor are tracked in a global idr struct that is capped at
BINDERFS_MAX_MINOR. The minor number tracker is protected by a global
mutex. This is the only point of contention between binderfs mounts.
- struct binderfs_info:
Each binderfs super block has its own struct binderfs_info that tracks
specific details about a binderfs instance:
- ipc namespace
- dentry of the binder-control device
- root uid and root gid of the user namespace the binderfs instance
was mounted in
- mountable by user namespace root:
binderfs can be mounted by user namespace root in a non-initial user
namespace. The devices will be owned by user namespace root.
- binderfs binder devices without misc infrastructure:
New binder devices associated with a binderfs mount do not use the
full misc_register() infrastructure.
The misc_register() infrastructure can only create new devices in the
host's devtmpfs mount. binderfs does however only make devices appear
under its own mountpoint and thus allocates new character device nodes
from the inode of the root dentry of the super block. This will have
the side-effect that binderfs specific device nodes do not appear in
sysfs. This behavior is similar to devpts allocated pts devices and
has no effect on the functionality of the ipc mechanism itself.
[1]: https://goo.gl/JL2tfX
[2]: program to allocate a new binderfs binder device:
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/android/binder_ctl.h>
int main(int argc, char *argv[])
{
int fd, ret, saved_errno;
size_t len;
struct binderfs_device device = { 0 };
if (argc < 2)
exit(EXIT_FAILURE);
len = strlen(argv[1]);
if (len > BINDERFS_MAX_NAME)
exit(EXIT_FAILURE);
memcpy(device.name, argv[1], len);
fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("%s - Failed to open binder-control device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
if (ret < 0) {
printf("%s - Failed to allocate new binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
printf("Allocated new binder device with major %d, minor %d, and "
"name %s\n", device.major, device.minor,
device.name);
exit(EXIT_SUCCESS);
}
Cc: Martijn Coenen <maco@android.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-12-14 20:11:14 +08:00
|
|
|
#include "binder_internal.h"
|
2012-10-17 06:29:53 +08:00
|
|
|
#include "binder_trace.h"
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
static HLIST_HEAD(binder_deferred_list);
|
2011-11-30 19:18:14 +08:00
|
|
|
static DEFINE_MUTEX(binder_deferred_lock);
|
|
|
|
|
2017-02-04 06:40:48 +08:00
|
|
|
static HLIST_HEAD(binder_devices);
|
2011-11-30 19:18:14 +08:00
|
|
|
static HLIST_HEAD(binder_procs);
|
2017-06-30 03:01:43 +08:00
|
|
|
static DEFINE_MUTEX(binder_procs_lock);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static HLIST_HEAD(binder_dead_nodes);
|
2017-06-30 03:01:43 +08:00
|
|
|
static DEFINE_SPINLOCK(binder_dead_nodes_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
static struct dentry *binder_debugfs_dir_entry_root;
|
|
|
|
static struct dentry *binder_debugfs_dir_entry_proc;
|
2017-06-30 03:01:45 +08:00
|
|
|
static atomic_t binder_last_id;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2018-12-01 09:26:30 +08:00
|
|
|
static int proc_show(struct seq_file *m, void *unused);
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BINDER_DEBUG_USER_ERROR = 1U << 0,
|
|
|
|
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
|
|
|
|
BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
|
|
|
|
BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
|
|
|
|
BINDER_DEBUG_DEAD_BINDER = 1U << 4,
|
|
|
|
BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
|
|
|
|
BINDER_DEBUG_READ_WRITE = 1U << 6,
|
|
|
|
BINDER_DEBUG_USER_REFS = 1U << 7,
|
|
|
|
BINDER_DEBUG_THREADS = 1U << 8,
|
|
|
|
BINDER_DEBUG_TRANSACTION = 1U << 9,
|
|
|
|
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
|
|
|
|
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
|
|
|
|
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
|
2017-06-30 03:01:40 +08:00
|
|
|
BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
|
2017-06-30 03:02:00 +08:00
|
|
|
BINDER_DEBUG_SPINLOCKS = 1U << 14,
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
|
|
|
|
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
|
2017-12-22 22:07:02 +08:00
|
|
|
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2019-09-04 19:07:03 +08:00
|
|
|
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
|
2017-02-04 06:40:48 +08:00
|
|
|
module_param_named(devices, binder_devices_param, charp, 0444);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
|
|
|
|
static int binder_stop_on_user_error;
|
|
|
|
|
|
|
|
static int binder_set_stop_on_user_error(const char *val,
|
treewide: Fix function prototypes for module_param_call()
Several function prototypes for the set/get functions defined by
module_param_call() have a slightly wrong argument types. This fixes
those in an effort to clean up the calls when running under type-enforced
compiler instrumentation for CFI. This is the result of running the
following semantic patch:
@match_module_param_call_function@
declarer name module_param_call;
identifier _name, _set_func, _get_func;
expression _arg, _mode;
@@
module_param_call(_name, _set_func, _get_func, _arg, _mode);
@fix_set_prototype
depends on match_module_param_call_function@
identifier match_module_param_call_function._set_func;
identifier _val, _param;
type _val_type, _param_type;
@@
int _set_func(
-_val_type _val
+const char * _val
,
-_param_type _param
+const struct kernel_param * _param
) { ... }
@fix_get_prototype
depends on match_module_param_call_function@
identifier match_module_param_call_function._get_func;
identifier _val, _param;
type _val_type, _param_type;
@@
int _get_func(
-_val_type _val
+char * _val
,
-_param_type _param
+const struct kernel_param * _param
) { ... }
Two additional by-hand changes are included for places where the above
Coccinelle script didn't notice them:
drivers/platform/x86/thinkpad_acpi.c
fs/lockd/svc.c
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Jessica Yu <jeyu@kernel.org>
2017-10-18 10:04:42 +08:00
|
|
|
const struct kernel_param *kp)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = param_set_int(val, kp);
|
|
|
|
if (binder_stop_on_user_error < 2)
|
|
|
|
wake_up(&binder_user_error_wait);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
|
2017-12-22 22:07:02 +08:00
|
|
|
param_get_int, &binder_stop_on_user_error, 0644);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2022-04-30 07:56:43 +08:00
|
|
|
static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
|
|
|
|
{
|
|
|
|
struct va_format vaf;
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
if (binder_debug_mask & mask) {
|
|
|
|
va_start(args, format);
|
|
|
|
vaf.va = &args;
|
|
|
|
vaf.fmt = format;
|
|
|
|
pr_info_ratelimited("%pV", &vaf);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2022-04-30 07:56:44 +08:00
|
|
|
#define binder_txn_error(x...) \
|
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
|
|
|
|
|
2022-04-30 07:56:43 +08:00
|
|
|
static __printf(1, 2) void binder_user_error(const char *format, ...)
|
|
|
|
{
|
|
|
|
struct va_format vaf;
|
|
|
|
va_list args;
|
|
|
|
|
|
|
|
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
|
|
|
|
va_start(args, format);
|
|
|
|
vaf.va = &args;
|
|
|
|
vaf.fmt = format;
|
|
|
|
pr_info_ratelimited("%pV", &vaf);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (binder_stop_on_user_error)
|
|
|
|
binder_stop_on_user_error = 2;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2022-04-30 07:56:41 +08:00
|
|
|
#define binder_set_extended_error(ee, _id, _command, _param) \
|
|
|
|
do { \
|
|
|
|
(ee)->id = _id; \
|
|
|
|
(ee)->command = _command; \
|
|
|
|
(ee)->param = _param; \
|
|
|
|
} while (0)
|
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
#define to_flat_binder_object(hdr) \
|
|
|
|
container_of(hdr, struct flat_binder_object, hdr)
|
|
|
|
|
|
|
|
#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
|
|
|
|
|
2017-02-04 06:40:51 +08:00
|
|
|
#define to_binder_buffer_object(hdr) \
|
|
|
|
container_of(hdr, struct binder_buffer_object, hdr)
|
|
|
|
|
2017-02-04 06:40:52 +08:00
|
|
|
#define to_binder_fd_array_object(hdr) \
|
|
|
|
container_of(hdr, struct binder_fd_array_object, hdr)
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static struct binder_stats binder_stats;
|
|
|
|
|
|
|
|
static inline void binder_stats_deleted(enum binder_stat_types type)
|
|
|
|
{
|
2017-06-30 03:01:44 +08:00
|
|
|
atomic_inc(&binder_stats.obj_deleted[type]);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void binder_stats_created(enum binder_stat_types type)
|
|
|
|
{
|
2017-06-30 03:01:44 +08:00
|
|
|
atomic_inc(&binder_stats.obj_created[type]);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
struct binder_transaction_log_entry {
|
|
|
|
int debug_id;
|
|
|
|
int debug_id_done;
|
|
|
|
int call_type;
|
|
|
|
int from_proc;
|
|
|
|
int from_thread;
|
|
|
|
int target_handle;
|
|
|
|
int to_proc;
|
|
|
|
int to_thread;
|
|
|
|
int to_node;
|
|
|
|
int data_size;
|
|
|
|
int offsets_size;
|
|
|
|
int return_error_line;
|
|
|
|
uint32_t return_error;
|
|
|
|
uint32_t return_error_param;
|
|
|
|
char context_name[BINDERFS_MAX_NAME + 1];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct binder_transaction_log {
|
|
|
|
atomic_t cur;
|
|
|
|
bool full;
|
|
|
|
struct binder_transaction_log_entry entry[32];
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct binder_transaction_log binder_transaction_log;
|
|
|
|
static struct binder_transaction_log binder_transaction_log_failed;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|
|
|
struct binder_transaction_log *log)
|
|
|
|
{
|
|
|
|
struct binder_transaction_log_entry *e;
|
2017-06-30 03:01:53 +08:00
|
|
|
unsigned int cur = atomic_inc_return(&log->cur);
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:01:53 +08:00
|
|
|
if (cur >= ARRAY_SIZE(log->entry))
|
2018-01-24 02:04:27 +08:00
|
|
|
log->full = true;
|
2017-06-30 03:01:53 +08:00
|
|
|
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
|
|
|
|
WRITE_ONCE(e->debug_id_done, 0);
|
|
|
|
/*
|
|
|
|
* write-barrier to synchronize access to e->debug_id_done.
|
|
|
|
* We make sure the initialized 0 value is seen before
|
|
|
|
* memset() other fields are zeroed by memset.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
memset(e, 0, sizeof(*e));
|
2011-11-30 19:18:14 +08:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum binder_deferred_state {
|
2018-08-29 04:46:25 +08:00
|
|
|
BINDER_DEFERRED_FLUSH = 0x01,
|
|
|
|
BINDER_DEFERRED_RELEASE = 0x02,
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
BINDER_LOOPER_STATE_REGISTERED = 0x01,
|
|
|
|
BINDER_LOOPER_STATE_ENTERED = 0x02,
|
|
|
|
BINDER_LOOPER_STATE_EXITED = 0x04,
|
|
|
|
BINDER_LOOPER_STATE_INVALID = 0x08,
|
|
|
|
BINDER_LOOPER_STATE_WAITING = 0x10,
|
2017-08-31 16:04:18 +08:00
|
|
|
BINDER_LOOPER_STATE_POLL = 0x20,
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
|
2017-06-30 03:02:00 +08:00
|
|
|
/**
|
|
|
|
* binder_proc_lock() - Acquire outer lock for given binder_proc
|
|
|
|
* @proc: struct binder_proc to acquire
|
|
|
|
*
|
|
|
|
* Acquires proc->outer_lock. Used to protect binder_ref
|
|
|
|
* structures associated with the given proc.
|
|
|
|
*/
|
|
|
|
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_proc_lock(struct binder_proc *proc, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquires(&proc->outer_lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_lock(&proc->outer_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_proc_unlock() - Release spinlock for given binder_proc
|
2023-01-18 02:37:45 +08:00
|
|
|
* @proc: struct binder_proc to acquire
|
2017-06-30 03:02:00 +08:00
|
|
|
*
|
|
|
|
* Release lock acquired via binder_proc_lock()
|
|
|
|
*/
|
2023-01-18 02:37:45 +08:00
|
|
|
#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
|
2017-06-30 03:02:00 +08:00
|
|
|
static void
|
|
|
|
_binder_proc_unlock(struct binder_proc *proc, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__releases(&proc->outer_lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_unlock(&proc->outer_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_inner_proc_lock() - Acquire inner lock for given binder_proc
|
|
|
|
* @proc: struct binder_proc to acquire
|
|
|
|
*
|
|
|
|
* Acquires proc->inner_lock. Used to protect todo lists
|
|
|
|
*/
|
|
|
|
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_inner_proc_lock(struct binder_proc *proc, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquires(&proc->inner_lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_lock(&proc->inner_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_inner_proc_unlock() - Release inner lock for given binder_proc
|
|
|
|
* @proc: struct binder_proc to acquire
|
|
|
|
*
|
|
|
|
* Release lock acquired via binder_inner_proc_lock()
|
|
|
|
*/
|
|
|
|
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__releases(&proc->inner_lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_unlock(&proc->inner_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_node_lock() - Acquire spinlock for given binder_node
|
|
|
|
* @node: struct binder_node to acquire
|
|
|
|
*
|
|
|
|
* Acquires node->lock. Used to protect binder_node fields
|
|
|
|
*/
|
|
|
|
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_node_lock(struct binder_node *node, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquires(&node->lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_lock(&node->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_node_unlock() - Release spinlock for given binder_proc
|
|
|
|
* @node: struct binder_node to acquire
|
|
|
|
*
|
|
|
|
* Release lock acquired via binder_node_lock()
|
|
|
|
*/
|
|
|
|
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_node_unlock(struct binder_node *node, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__releases(&node->lock)
|
2017-06-30 03:02:00 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_unlock(&node->lock);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
/**
|
|
|
|
* binder_node_inner_lock() - Acquire node and inner locks
|
|
|
|
* @node: struct binder_node to acquire
|
|
|
|
*
|
|
|
|
* Acquires node->lock. If node->proc also acquires
|
|
|
|
* proc->inner_lock. Used to protect binder_node fields
|
|
|
|
*/
|
|
|
|
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_node_inner_lock(struct binder_node *node, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquires(&node->lock) __acquires(&node->proc->inner_lock)
|
2017-06-30 03:02:03 +08:00
|
|
|
{
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
spin_lock(&node->lock);
|
|
|
|
if (node->proc)
|
|
|
|
binder_inner_proc_lock(node->proc);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
/* annotation for sparse */
|
|
|
|
__acquire(&node->proc->inner_lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-01-18 02:37:45 +08:00
|
|
|
* binder_node_inner_unlock() - Release node and inner locks
|
2017-06-30 03:02:03 +08:00
|
|
|
* @node: struct binder_node to acquire
|
|
|
|
*
|
|
|
|
* Release lock acquired via binder_node_lock()
|
|
|
|
*/
|
|
|
|
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
|
|
|
|
static void
|
|
|
|
_binder_node_inner_unlock(struct binder_node *node, int line)
|
2018-11-07 07:56:31 +08:00
|
|
|
__releases(&node->lock) __releases(&node->proc->inner_lock)
|
2017-06-30 03:02:03 +08:00
|
|
|
{
|
|
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
|
|
"%s: line=%d\n", __func__, line);
|
|
|
|
if (proc)
|
|
|
|
binder_inner_proc_unlock(proc);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
/* annotation for sparse */
|
|
|
|
__release(&node->proc->inner_lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
spin_unlock(&node->lock);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
static bool binder_worklist_empty_ilocked(struct list_head *list)
|
|
|
|
{
|
|
|
|
return list_empty(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_worklist_empty() - Check if no items on the work list
|
|
|
|
* @proc: binder_proc associated with list
|
|
|
|
* @list: list to check
|
|
|
|
*
|
|
|
|
* Return: true if there are no items on list, else false
|
|
|
|
*/
|
|
|
|
static bool binder_worklist_empty(struct binder_proc *proc,
|
|
|
|
struct list_head *list)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
ret = binder_worklist_empty_ilocked(list);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-11-15 16:21:35 +08:00
|
|
|
/**
|
|
|
|
* binder_enqueue_work_ilocked() - Add an item to the work list
|
|
|
|
* @work: struct binder_work to add to list
|
|
|
|
* @target_list: list to add work to
|
|
|
|
*
|
|
|
|
* Adds the work to the specified list. Asserts that work
|
|
|
|
* is not already on a list.
|
|
|
|
*
|
|
|
|
* Requires the proc->inner_lock to be held.
|
|
|
|
*/
|
2017-06-30 03:02:02 +08:00
|
|
|
static void
|
|
|
|
binder_enqueue_work_ilocked(struct binder_work *work,
|
|
|
|
struct list_head *target_list)
|
|
|
|
{
|
|
|
|
BUG_ON(target_list == NULL);
|
|
|
|
BUG_ON(work->entry.next && !list_empty(&work->entry));
|
|
|
|
list_add_tail(&work->entry, target_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-11-15 16:21:35 +08:00
|
|
|
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
|
|
|
|
* @thread: thread to queue work to
|
2017-06-30 03:02:02 +08:00
|
|
|
* @work: struct binder_work to add to list
|
|
|
|
*
|
2017-11-15 16:21:35 +08:00
|
|
|
* Adds the work to the todo list of the thread. Doesn't set the process_todo
|
|
|
|
* flag, which means that (if it wasn't already set) the thread will go to
|
|
|
|
* sleep without handling this work when it calls read.
|
|
|
|
*
|
|
|
|
* Requires the proc->inner_lock to be held.
|
2017-06-30 03:02:02 +08:00
|
|
|
*/
|
|
|
|
static void
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
|
|
|
|
struct binder_work *work)
|
2017-06-30 03:02:02 +08:00
|
|
|
{
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
WARN_ON(!list_empty(&thread->waiting_thread_node));
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
|
|
|
|
* @thread: thread to queue work to
|
|
|
|
* @work: struct binder_work to add to list
|
|
|
|
*
|
|
|
|
* Adds the work to the todo list of the thread, and enables processing
|
|
|
|
* of the todo queue.
|
|
|
|
*
|
|
|
|
* Requires the proc->inner_lock to be held.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
|
|
|
struct binder_work *work)
|
|
|
|
{
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
WARN_ON(!list_empty(&thread->waiting_thread_node));
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
|
thread->process_todo = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_enqueue_thread_work() - Add an item to the thread work list
|
|
|
|
* @thread: thread to queue work to
|
|
|
|
* @work: struct binder_work to add to list
|
|
|
|
*
|
|
|
|
* Adds the work to the todo list of the thread, and enables processing
|
|
|
|
* of the todo queue.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
binder_enqueue_thread_work(struct binder_thread *thread,
|
|
|
|
struct binder_work *work)
|
|
|
|
{
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
|
|
binder_enqueue_thread_work_ilocked(thread, work);
|
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2017-06-30 03:02:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
binder_dequeue_work_ilocked(struct binder_work *work)
|
|
|
|
{
|
|
|
|
list_del_init(&work->entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_dequeue_work() - Removes an item from the work list
|
|
|
|
* @proc: binder_proc associated with list
|
|
|
|
* @work: struct binder_work to remove from list
|
|
|
|
*
|
|
|
|
* Removes the specified work item from whatever list it is on.
|
|
|
|
* Can safely be called if work is not on any list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
|
|
|
|
{
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
binder_dequeue_work_ilocked(work);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct binder_work *binder_dequeue_work_head_ilocked(
|
|
|
|
struct list_head *list)
|
|
|
|
{
|
|
|
|
struct binder_work *w;
|
|
|
|
|
|
|
|
w = list_first_entry_or_null(list, struct binder_work, entry);
|
|
|
|
if (w)
|
|
|
|
list_del_init(&w->entry);
|
|
|
|
return w;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static void
|
|
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
|
2017-06-30 03:01:57 +08:00
|
|
|
static void binder_free_thread(struct binder_thread *thread);
|
|
|
|
static void binder_free_proc(struct binder_proc *proc);
|
2017-06-30 03:02:04 +08:00
|
|
|
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-08-31 16:04:18 +08:00
|
|
|
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
|
|
bool do_proc_work)
|
|
|
|
{
|
2017-11-15 16:21:35 +08:00
|
|
|
return thread->process_todo ||
|
2017-08-31 16:04:18 +08:00
|
|
|
thread->looper_need_return ||
|
|
|
|
(do_proc_work &&
|
|
|
|
!binder_worklist_empty_ilocked(&thread->proc->todo));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
|
|
|
|
{
|
|
|
|
bool has_work;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
|
|
has_work = binder_has_work_ilocked(thread, do_proc_work);
|
|
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
|
|
|
|
return has_work;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
return !thread->transaction_stack &&
|
|
|
|
binder_worklist_empty_ilocked(&thread->todo) &&
|
|
|
|
(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
|
|
|
|
BINDER_LOOPER_STATE_REGISTERED));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
|
|
|
|
bool sync)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
struct binder_thread *thread;
|
|
|
|
|
|
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
|
|
|
|
binder_available_for_proc_work_ilocked(thread)) {
|
|
|
|
if (sync)
|
|
|
|
wake_up_interruptible_sync(&thread->wait);
|
|
|
|
else
|
|
|
|
wake_up_interruptible(&thread->wait);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-31 16:04:19 +08:00
|
|
|
/**
|
|
|
|
* binder_select_thread_ilocked() - selects a thread for doing proc work.
|
|
|
|
* @proc: process to select a thread from
|
|
|
|
*
|
|
|
|
* Note that calling this function moves the thread off the waiting_threads
|
|
|
|
* list, so it can only be woken up by the caller of this function, or a
|
|
|
|
* signal. Therefore, callers *should* always wake up the thread this function
|
|
|
|
* returns.
|
|
|
|
*
|
|
|
|
* Return: If there's a thread currently waiting for process work,
|
|
|
|
* returns that thread. Otherwise returns NULL.
|
|
|
|
*/
|
|
|
|
static struct binder_thread *
|
|
|
|
binder_select_thread_ilocked(struct binder_proc *proc)
|
2017-08-31 16:04:18 +08:00
|
|
|
{
|
|
|
|
struct binder_thread *thread;
|
|
|
|
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
2017-08-31 16:04:18 +08:00
|
|
|
thread = list_first_entry_or_null(&proc->waiting_threads,
|
|
|
|
struct binder_thread,
|
|
|
|
waiting_thread_node);
|
|
|
|
|
2017-08-31 16:04:19 +08:00
|
|
|
if (thread)
|
2017-08-31 16:04:18 +08:00
|
|
|
list_del_init(&thread->waiting_thread_node);
|
2017-08-31 16:04:19 +08:00
|
|
|
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
|
|
|
|
* @proc: process to wake up a thread in
|
|
|
|
* @thread: specific thread to wake-up (may be NULL)
|
|
|
|
* @sync: whether to do a synchronous wake-up
|
|
|
|
*
|
|
|
|
* This function wakes up a thread in the @proc process.
|
|
|
|
* The caller may provide a specific thread to wake-up in
|
|
|
|
* the @thread parameter. If @thread is NULL, this function
|
|
|
|
* will wake up threads that have called poll().
|
|
|
|
*
|
|
|
|
* Note that for this function to work as expected, callers
|
|
|
|
* should first call binder_select_thread() to find a thread
|
|
|
|
* to handle the work (if they don't have a thread already),
|
|
|
|
* and pass the result into the @thread parameter.
|
|
|
|
*/
|
|
|
|
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
|
|
|
bool sync)
|
|
|
|
{
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
2017-08-31 16:04:19 +08:00
|
|
|
|
|
|
|
if (thread) {
|
2017-08-31 16:04:18 +08:00
|
|
|
if (sync)
|
|
|
|
wake_up_interruptible_sync(&thread->wait);
|
|
|
|
else
|
|
|
|
wake_up_interruptible(&thread->wait);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Didn't find a thread waiting for proc work; this can happen
|
|
|
|
* in two scenarios:
|
|
|
|
* 1. All threads are busy handling transactions
|
|
|
|
* In that case, one of those threads should call back into
|
|
|
|
* the kernel driver soon and pick up this work.
|
|
|
|
* 2. Threads are using the (e)poll interface, in which case
|
|
|
|
* they may be blocked on the waitqueue without having been
|
|
|
|
* added to waiting_threads. For this case, we just iterate
|
|
|
|
* over all threads not handling transaction work, and
|
|
|
|
* wake them all up. We wake all because we don't know whether
|
|
|
|
* a thread that called into (e)poll is handling non-binder
|
|
|
|
* work currently.
|
|
|
|
*/
|
|
|
|
binder_wakeup_poll_threads_ilocked(proc, sync);
|
|
|
|
}
|
|
|
|
|
2017-08-31 16:04:19 +08:00
|
|
|
static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
|
|
|
|
{
|
|
|
|
struct binder_thread *thread = binder_select_thread_ilocked(proc);
|
|
|
|
|
|
|
|
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static void binder_set_nice(long nice)
|
|
|
|
{
|
|
|
|
long min_nice;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (can_nice(current, nice)) {
|
|
|
|
set_user_nice(current, nice);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-06 01:24:44 +08:00
|
|
|
min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d: nice value %ld not allowed use %ld instead\n",
|
|
|
|
current->pid, nice, min_nice);
|
2011-11-30 19:18:14 +08:00
|
|
|
set_user_nice(current, min_nice);
|
2014-03-11 18:09:12 +08:00
|
|
|
if (min_nice <= MAX_NICE)
|
2011-11-30 19:18:14 +08:00
|
|
|
return;
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:04 +08:00
|
|
|
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
|
|
|
|
binder_uintptr_t ptr)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct rb_node *n = proc->nodes.rb_node;
|
|
|
|
struct binder_node *node;
|
|
|
|
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
2017-06-30 03:02:04 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
while (n) {
|
|
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
|
|
|
|
|
|
|
if (ptr < node->ptr)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (ptr > node->ptr)
|
|
|
|
n = n->rb_right;
|
2017-06-30 03:01:59 +08:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* take an implicit weak reference
|
|
|
|
* to ensure node stays alive until
|
|
|
|
* call to binder_put_node()
|
|
|
|
*/
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
return node;
|
2017-06-30 03:01:59 +08:00
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:04 +08:00
|
|
|
static struct binder_node *binder_get_node(struct binder_proc *proc,
|
|
|
|
binder_uintptr_t ptr)
|
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
node = binder_get_node_ilocked(proc, ptr);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct binder_node *binder_init_node_ilocked(
|
|
|
|
struct binder_proc *proc,
|
|
|
|
struct binder_node *new_node,
|
|
|
|
struct flat_binder_object *fp)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct rb_node **p = &proc->nodes.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct binder_node *node;
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_uintptr_t ptr = fp ? fp->binder : 0;
|
|
|
|
binder_uintptr_t cookie = fp ? fp->cookie : 0;
|
|
|
|
__u32 flags = fp ? fp->flags : 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
while (*p) {
|
2017-06-30 03:02:04 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
parent = *p;
|
|
|
|
node = rb_entry(parent, struct binder_node, rb_node);
|
|
|
|
|
|
|
|
if (ptr < node->ptr)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (ptr > node->ptr)
|
|
|
|
p = &(*p)->rb_right;
|
2017-06-30 03:02:04 +08:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* A matching node is already in
|
|
|
|
* the rb tree. Abandon the init
|
|
|
|
* and return it.
|
|
|
|
*/
|
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
|
|
return node;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:04 +08:00
|
|
|
node = new_node;
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stats_created(BINDER_STAT_NODE);
|
2017-06-30 03:01:59 +08:00
|
|
|
node->tmp_refs++;
|
2011-11-30 19:18:14 +08:00
|
|
|
rb_link_node(&node->rb_node, parent, p);
|
|
|
|
rb_insert_color(&node->rb_node, &proc->nodes);
|
2017-06-30 03:01:45 +08:00
|
|
|
node->debug_id = atomic_inc_return(&binder_last_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
node->proc = proc;
|
|
|
|
node->ptr = ptr;
|
|
|
|
node->cookie = cookie;
|
|
|
|
node->work.type = BINDER_WORK_NODE;
|
2017-06-30 03:02:03 +08:00
|
|
|
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
|
|
|
|
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
|
2019-01-15 01:10:21 +08:00
|
|
|
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
|
2017-06-30 03:02:00 +08:00
|
|
|
spin_lock_init(&node->lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
INIT_LIST_HEAD(&node->work.entry);
|
|
|
|
INIT_LIST_HEAD(&node->async_todo);
|
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
2014-02-22 06:40:26 +08:00
|
|
|
"%d:%d node %d u%016llx c%016llx created\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, current->pid, node->debug_id,
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)node->ptr, (u64)node->cookie);
|
2017-06-30 03:02:04 +08:00
|
|
|
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|
|
|
struct flat_binder_object *fp)
|
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!new_node)
|
|
|
|
return NULL;
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
node = binder_init_node_ilocked(proc, new_node, fp);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (node != new_node)
|
|
|
|
/*
|
|
|
|
* The node was already added by another thread
|
|
|
|
*/
|
|
|
|
kfree(new_node);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:01 +08:00
|
|
|
static void binder_free_node(struct binder_node *node)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:02:01 +08:00
|
|
|
kfree(node);
|
|
|
|
binder_stats_deleted(BINDER_STAT_NODE);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
|
|
int internal,
|
|
|
|
struct list_head *target_list)
|
2017-06-30 03:02:01 +08:00
|
|
|
{
|
2017-06-30 03:02:03 +08:00
|
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&node->lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
if (proc)
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (strong) {
|
|
|
|
if (internal) {
|
|
|
|
if (target_list == NULL &&
|
|
|
|
node->internal_strong_refs == 0 &&
|
2017-02-04 06:40:46 +08:00
|
|
|
!(node->proc &&
|
|
|
|
node == node->proc->context->binder_context_mgr_node &&
|
|
|
|
node->has_strong_ref)) {
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_err("invalid inc strong node for %d\n",
|
|
|
|
node->debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
node->internal_strong_refs++;
|
|
|
|
} else
|
|
|
|
node->local_strong_refs++;
|
|
|
|
if (!node->has_strong_ref && target_list) {
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
struct binder_thread *thread = container_of(target_list,
|
|
|
|
struct binder_thread, todo);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_dequeue_work_ilocked(&node->work);
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
BUG_ON(&thread->todo != target_list);
|
|
|
|
binder_enqueue_deferred_thread_work_ilocked(thread,
|
|
|
|
&node->work);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!internal)
|
|
|
|
node->local_weak_refs++;
|
|
|
|
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
|
|
|
|
if (target_list == NULL) {
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_err("invalid inc weak node for %d\n",
|
|
|
|
node->debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-11-15 16:21:35 +08:00
|
|
|
/*
|
|
|
|
* See comment above
|
|
|
|
*/
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_enqueue_work_ilocked(&node->work, target_list);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:01 +08:00
|
|
|
static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
|
|
struct list_head *target_list)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_lock(node);
|
|
|
|
ret = binder_inc_node_nilocked(node, strong, internal, target_list);
|
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
static bool binder_dec_node_nilocked(struct binder_node *node,
|
|
|
|
int strong, int internal)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:02:01 +08:00
|
|
|
struct binder_proc *proc = node->proc;
|
|
|
|
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&node->lock);
|
2017-06-30 03:02:01 +08:00
|
|
|
if (proc)
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&proc->inner_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (strong) {
|
|
|
|
if (internal)
|
|
|
|
node->internal_strong_refs--;
|
|
|
|
else
|
|
|
|
node->local_strong_refs--;
|
|
|
|
if (node->local_strong_refs || node->internal_strong_refs)
|
2017-06-30 03:02:01 +08:00
|
|
|
return false;
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
|
|
|
if (!internal)
|
|
|
|
node->local_weak_refs--;
|
2017-06-30 03:01:59 +08:00
|
|
|
if (node->local_weak_refs || node->tmp_refs ||
|
|
|
|
!hlist_empty(&node->refs))
|
2017-06-30 03:02:01 +08:00
|
|
|
return false;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:01 +08:00
|
|
|
|
|
|
|
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
2011-11-30 19:18:14 +08:00
|
|
|
if (list_empty(&node->work.entry)) {
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_enqueue_work_ilocked(&node->work, &proc->todo);
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_wakeup_proc_ilocked(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
2017-06-30 03:01:59 +08:00
|
|
|
!node->local_weak_refs && !node->tmp_refs) {
|
2017-06-30 03:02:01 +08:00
|
|
|
if (proc) {
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"refless node %d deleted\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
node->debug_id);
|
|
|
|
} else {
|
2017-06-30 03:02:02 +08:00
|
|
|
BUG_ON(!list_empty(&node->work.entry));
|
2017-06-30 03:01:43 +08:00
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
2017-06-30 03:02:01 +08:00
|
|
|
/*
|
|
|
|
* tmp_refs could have changed so
|
|
|
|
* check it again
|
|
|
|
*/
|
|
|
|
if (node->tmp_refs) {
|
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
|
|
return false;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
hlist_del(&node->dead_node);
|
2017-06-30 03:01:43 +08:00
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"dead node %d deleted\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
node->debug_id);
|
|
|
|
}
|
2017-06-30 03:02:01 +08:00
|
|
|
return true;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:02:01 +08:00
|
|
|
return false;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:01 +08:00
|
|
|
static void binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
|
|
{
|
|
|
|
bool free_node;
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_lock(node);
|
|
|
|
free_node = binder_dec_node_nilocked(node, strong, internal);
|
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
if (free_node)
|
|
|
|
binder_free_node(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* No call to binder_inc_node() is needed since we
|
|
|
|
* don't need to inform userspace of any changes to
|
|
|
|
* tmp_refs
|
|
|
|
*/
|
|
|
|
node->tmp_refs++;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:59 +08:00
|
|
|
/**
|
|
|
|
* binder_inc_node_tmpref() - take a temporary reference on node
|
|
|
|
* @node: node to reference
|
|
|
|
*
|
|
|
|
* Take reference on node to prevent the node from being freed
|
2017-06-30 03:02:01 +08:00
|
|
|
* while referenced only by a local variable. The inner lock is
|
|
|
|
* needed to serialize with the node work on the queue (which
|
|
|
|
* isn't needed after the node is dead). If the node is dead
|
|
|
|
* (node->proc is NULL), use binder_dead_nodes_lock to protect
|
|
|
|
* node->tmp_refs against dead-node-only cases where the node
|
|
|
|
* lock cannot be acquired (eg traversing the dead node list to
|
|
|
|
* print nodes)
|
2017-06-30 03:01:59 +08:00
|
|
|
*/
|
|
|
|
static void binder_inc_node_tmpref(struct binder_node *node)
|
|
|
|
{
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
if (node->proc)
|
|
|
|
binder_inner_proc_lock(node->proc);
|
|
|
|
else
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
|
|
if (node->proc)
|
|
|
|
binder_inner_proc_unlock(node->proc);
|
|
|
|
else
|
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_dec_node_tmpref() - remove a temporary reference on node
|
|
|
|
* @node: node to reference
|
|
|
|
*
|
|
|
|
* Release temporary reference on node taken via binder_inc_node_tmpref()
|
|
|
|
*/
|
|
|
|
static void binder_dec_node_tmpref(struct binder_node *node)
|
|
|
|
{
|
2017-06-30 03:02:01 +08:00
|
|
|
bool free_node;
|
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_lock(node);
|
|
|
|
if (!node->proc)
|
2017-06-30 03:02:01 +08:00
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
__acquire(&binder_dead_nodes_lock);
|
2017-06-30 03:01:59 +08:00
|
|
|
node->tmp_refs--;
|
|
|
|
BUG_ON(node->tmp_refs < 0);
|
2017-06-30 03:02:01 +08:00
|
|
|
if (!node->proc)
|
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
__release(&binder_dead_nodes_lock);
|
2017-06-30 03:01:59 +08:00
|
|
|
/*
|
|
|
|
* Call binder_dec_node() to check if all refcounts are 0
|
|
|
|
* and cleanup is needed. Calling with strong=0 and internal=1
|
|
|
|
* causes no actual reference to be released in binder_dec_node().
|
|
|
|
* If that changes, a change is needed here too.
|
|
|
|
*/
|
2017-06-30 03:02:03 +08:00
|
|
|
free_node = binder_dec_node_nilocked(node, 0, 1);
|
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
if (free_node)
|
|
|
|
binder_free_node(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_put_node(struct binder_node *node)
|
|
|
|
{
|
|
|
|
binder_dec_node_tmpref(node);
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
|
|
|
|
u32 desc, bool need_strong_ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct rb_node *n = proc->refs_by_desc.rb_node;
|
|
|
|
struct binder_ref *ref;
|
|
|
|
|
|
|
|
while (n) {
|
|
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
if (desc < ref->data.desc) {
|
2011-11-30 19:18:14 +08:00
|
|
|
n = n->rb_left;
|
2017-06-30 03:01:58 +08:00
|
|
|
} else if (desc > ref->data.desc) {
|
2011-11-30 19:18:14 +08:00
|
|
|
n = n->rb_right;
|
2017-06-30 03:01:58 +08:00
|
|
|
} else if (need_strong_ref && !ref->data.strong) {
|
2016-10-24 21:20:29 +08:00
|
|
|
binder_user_error("tried to use weak ref as strong ref\n");
|
|
|
|
return NULL;
|
|
|
|
} else {
|
2011-11-30 19:18:14 +08:00
|
|
|
return ref;
|
2016-10-24 21:20:29 +08:00
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
/**
|
2017-06-30 03:02:08 +08:00
|
|
|
* binder_get_ref_for_node_olocked() - get the ref associated with given node
|
2017-06-30 03:01:58 +08:00
|
|
|
* @proc: binder_proc that owns the ref
|
|
|
|
* @node: binder_node of target
|
|
|
|
* @new_ref: newly allocated binder_ref to be initialized or %NULL
|
|
|
|
*
|
|
|
|
* Look up the ref for the given node and return it if it exists
|
|
|
|
*
|
|
|
|
* If it doesn't exist and the caller provides a newly allocated
|
|
|
|
* ref, initialize the fields of the newly allocated ref and insert
|
|
|
|
* into the given proc rb_trees and node refs list.
|
|
|
|
*
|
|
|
|
* Return: the ref for node. It is possible that another thread
|
|
|
|
* allocated/initialized the ref first in which case the
|
|
|
|
* returned ref would be different than the passed-in
|
|
|
|
* new_ref. new_ref must be kfree'd by the caller in
|
|
|
|
* this case.
|
|
|
|
*/
|
2017-06-30 03:02:08 +08:00
|
|
|
static struct binder_ref *binder_get_ref_for_node_olocked(
|
|
|
|
struct binder_proc *proc,
|
|
|
|
struct binder_node *node,
|
|
|
|
struct binder_ref *new_ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_context *context = proc->context;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct rb_node **p = &proc->refs_by_node.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_ref *ref;
|
|
|
|
struct rb_node *n;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
ref = rb_entry(parent, struct binder_ref, rb_node_node);
|
|
|
|
|
|
|
|
if (node < ref->node)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (node > ref->node)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return ref;
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
if (!new_ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
return NULL;
|
2017-06-30 03:01:58 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stats_created(BINDER_STAT_REF);
|
2017-06-30 03:01:58 +08:00
|
|
|
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
new_ref->proc = proc;
|
|
|
|
new_ref->node = node;
|
|
|
|
rb_link_node(&new_ref->rb_node_node, parent, p);
|
|
|
|
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
|
2011-11-30 19:18:14 +08:00
|
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.desc > new_ref->data.desc)
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
2017-06-30 03:01:58 +08:00
|
|
|
new_ref->data.desc = ref->data.desc + 1;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
p = &proc->refs_by_desc.rb_node;
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
if (new_ref->data.desc < ref->data.desc)
|
2011-11-30 19:18:14 +08:00
|
|
|
p = &(*p)->rb_left;
|
2017-06-30 03:01:58 +08:00
|
|
|
else if (new_ref->data.desc > ref->data.desc)
|
2011-11-30 19:18:14 +08:00
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
rb_link_node(&new_ref->rb_node_desc, parent, p);
|
|
|
|
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
|
2017-06-30 03:02:03 +08:00
|
|
|
|
|
|
|
binder_node_lock(node);
|
2017-06-30 03:01:50 +08:00
|
|
|
hlist_add_head(&new_ref->node_entry, &node->refs);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:50 +08:00
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
|
"%d new ref %d desc %d for node %d\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
|
2017-06-30 03:01:50 +08:00
|
|
|
node->debug_id);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
return new_ref;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:02:01 +08:00
|
|
|
bool delete_node = false;
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d delete ref %d desc %d for node %d\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->proc->pid, ref->data.debug_id, ref->data.desc,
|
2012-10-31 01:05:43 +08:00
|
|
|
ref->node->debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
|
|
|
|
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
|
2017-06-30 03:01:58 +08:00
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_lock(ref->node);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.strong)
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_dec_node_nilocked(ref->node, 1, 1);
|
2017-06-30 03:01:58 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
hlist_del(&ref->node_entry);
|
2017-06-30 03:02:03 +08:00
|
|
|
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
|
|
|
|
binder_node_inner_unlock(ref->node);
|
2017-06-30 03:02:01 +08:00
|
|
|
/*
|
|
|
|
* Clear ref->node unless we want the caller to free the node
|
|
|
|
*/
|
|
|
|
if (!delete_node) {
|
|
|
|
/*
|
|
|
|
* The caller uses ref->node to determine
|
|
|
|
* whether the node needs to be freed. Clear
|
|
|
|
* it since the node is still alive.
|
|
|
|
*/
|
|
|
|
ref->node = NULL;
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (ref->death) {
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d delete ref %d desc %d has death notification\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->proc->pid, ref->data.debug_id,
|
|
|
|
ref->data.desc);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_dequeue_work(ref->proc, &ref->death->work);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
|
|
}
|
|
|
|
binder_stats_deleted(BINDER_STAT_REF);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
/**
|
2017-06-30 03:02:08 +08:00
|
|
|
* binder_inc_ref_olocked() - increment the ref for given handle
|
2017-06-30 03:01:58 +08:00
|
|
|
* @ref: ref to be incremented
|
|
|
|
* @strong: if true, strong increment, else weak
|
|
|
|
* @target_list: list to queue node work on
|
|
|
|
*
|
2017-06-30 03:02:08 +08:00
|
|
|
* Increment the ref. @ref->proc->outer_lock must be held on entry
|
2017-06-30 03:01:58 +08:00
|
|
|
*
|
|
|
|
* Return: 0, if successful, else errno
|
|
|
|
*/
|
2017-06-30 03:02:08 +08:00
|
|
|
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
|
|
|
|
struct list_head *target_list)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (strong) {
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.strong == 0) {
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = binder_inc_node(ref->node, 1, 1, target_list);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->data.strong++;
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.weak == 0) {
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = binder_inc_node(ref->node, 0, 1, target_list);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->data.weak++;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
/**
|
2023-01-18 02:37:45 +08:00
|
|
|
* binder_dec_ref_olocked() - dec the ref for given handle
|
2017-06-30 03:01:58 +08:00
|
|
|
* @ref: ref to be decremented
|
|
|
|
* @strong: if true, strong decrement, else weak
|
|
|
|
*
|
|
|
|
* Decrement the ref.
|
|
|
|
*
|
2023-01-18 02:37:45 +08:00
|
|
|
* Return: %true if ref is cleaned up and ready to be freed.
|
2017-06-30 03:01:58 +08:00
|
|
|
*/
|
2017-06-30 03:02:08 +08:00
|
|
|
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
if (strong) {
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.strong == 0) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->proc->pid, ref->data.debug_id,
|
|
|
|
ref->data.desc, ref->data.strong,
|
|
|
|
ref->data.weak);
|
|
|
|
return false;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->data.strong--;
|
2017-06-30 03:02:01 +08:00
|
|
|
if (ref->data.strong == 0)
|
|
|
|
binder_dec_node(ref->node, strong, 1);
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.weak == 0) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->proc->pid, ref->data.debug_id,
|
|
|
|
ref->data.desc, ref->data.strong,
|
|
|
|
ref->data.weak);
|
|
|
|
return false;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
ref->data.weak--;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ref->data.strong == 0 && ref->data.weak == 0) {
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_cleanup_ref_olocked(ref);
|
2017-06-30 03:01:58 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_get_node_from_ref() - get the node from the given proc/desc
|
|
|
|
* @proc: proc containing the ref
|
|
|
|
* @desc: the handle associated with the ref
|
|
|
|
* @need_strong_ref: if true, only return node if ref is strong
|
|
|
|
* @rdata: the id/refcount data for the ref
|
|
|
|
*
|
|
|
|
* Given a proc and ref handle, return the associated binder_node
|
|
|
|
*
|
|
|
|
* Return: a binder_node or NULL if not found or not strong when strong required
|
|
|
|
*/
|
|
|
|
static struct binder_node *binder_get_node_from_ref(
|
|
|
|
struct binder_proc *proc,
|
|
|
|
u32 desc, bool need_strong_ref,
|
|
|
|
struct binder_ref_data *rdata)
|
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_ref *ref;
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (!ref)
|
|
|
|
goto err_no_ref;
|
|
|
|
node = ref->node;
|
2017-06-30 03:01:59 +08:00
|
|
|
/*
|
|
|
|
* Take an implicit reference on the node to ensure
|
|
|
|
* it stays alive until the call to binder_put_node()
|
|
|
|
*/
|
|
|
|
binder_inc_node_tmpref(node);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (rdata)
|
|
|
|
*rdata = ref->data;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
|
|
|
|
return node;
|
|
|
|
|
|
|
|
err_no_ref:
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_free_ref() - free the binder_ref
|
|
|
|
* @ref: ref to free
|
|
|
|
*
|
2017-06-30 03:02:01 +08:00
|
|
|
* Free the binder_ref. Free the binder_node indicated by ref->node
|
|
|
|
* (if non-NULL) and the binder_ref_death indicated by ref->death.
|
2017-06-30 03:01:58 +08:00
|
|
|
*/
|
|
|
|
static void binder_free_ref(struct binder_ref *ref)
|
|
|
|
{
|
2017-06-30 03:02:01 +08:00
|
|
|
if (ref->node)
|
|
|
|
binder_free_node(ref->node);
|
2017-06-30 03:01:58 +08:00
|
|
|
kfree(ref->death);
|
|
|
|
kfree(ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_update_ref_for_handle() - inc/dec the ref for given handle
|
|
|
|
* @proc: proc containing the ref
|
|
|
|
* @desc: the handle associated with the ref
|
|
|
|
* @increment: true=inc reference, false=dec reference
|
|
|
|
* @strong: true=strong reference, false=weak reference
|
|
|
|
* @rdata: the id/refcount data for the ref
|
|
|
|
*
|
|
|
|
* Given a proc and ref handle, increment or decrement the ref
|
|
|
|
* according to "increment" arg.
|
|
|
|
*
|
|
|
|
* Return: 0 if successful, else errno
|
|
|
|
*/
|
|
|
|
static int binder_update_ref_for_handle(struct binder_proc *proc,
|
|
|
|
uint32_t desc, bool increment, bool strong,
|
|
|
|
struct binder_ref_data *rdata)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct binder_ref *ref;
|
|
|
|
bool delete_ref = false;
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_olocked(proc, desc, strong);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (!ref) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_no_ref;
|
|
|
|
}
|
|
|
|
if (increment)
|
2017-06-30 03:02:08 +08:00
|
|
|
ret = binder_inc_ref_olocked(ref, strong, NULL);
|
2017-06-30 03:01:58 +08:00
|
|
|
else
|
2017-06-30 03:02:08 +08:00
|
|
|
delete_ref = binder_dec_ref_olocked(ref, strong);
|
2017-06-30 03:01:58 +08:00
|
|
|
|
|
|
|
if (rdata)
|
|
|
|
*rdata = ref->data;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
|
|
|
|
if (delete_ref)
|
|
|
|
binder_free_ref(ref);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err_no_ref:
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_dec_ref_for_handle() - dec the ref for given handle
|
|
|
|
* @proc: proc containing the ref
|
|
|
|
* @desc: the handle associated with the ref
|
|
|
|
* @strong: true=strong reference, false=weak reference
|
|
|
|
* @rdata: the id/refcount data for the ref
|
|
|
|
*
|
|
|
|
* Just calls binder_update_ref_for_handle() to decrement the ref.
|
|
|
|
*
|
|
|
|
* Return: 0 if successful, else errno
|
|
|
|
*/
|
|
|
|
static int binder_dec_ref_for_handle(struct binder_proc *proc,
|
|
|
|
uint32_t desc, bool strong, struct binder_ref_data *rdata)
|
|
|
|
{
|
|
|
|
return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_inc_ref_for_node() - increment the ref for given proc/node
|
|
|
|
* @proc: proc containing the ref
|
|
|
|
* @node: target node
|
|
|
|
* @strong: true=strong reference, false=weak reference
|
|
|
|
* @target_list: worklist to use if node is incremented
|
|
|
|
* @rdata: the id/refcount data for the ref
|
|
|
|
*
|
|
|
|
* Given a proc and node, increment the ref. Create the ref if it
|
|
|
|
* doesn't already exist
|
|
|
|
*
|
|
|
|
* Return: 0 if successful, else errno
|
|
|
|
*/
|
|
|
|
static int binder_inc_ref_for_node(struct binder_proc *proc,
|
|
|
|
struct binder_node *node,
|
|
|
|
bool strong,
|
|
|
|
struct list_head *target_list,
|
|
|
|
struct binder_ref_data *rdata)
|
|
|
|
{
|
|
|
|
struct binder_ref *ref;
|
|
|
|
struct binder_ref *new_ref = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (!ref) {
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
|
|
|
if (!new_ref)
|
|
|
|
return -ENOMEM;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
|
2017-06-30 03:01:58 +08:00
|
|
|
}
|
2017-06-30 03:02:08 +08:00
|
|
|
ret = binder_inc_ref_olocked(ref, strong, target_list);
|
2017-06-30 03:01:58 +08:00
|
|
|
*rdata = ref->data;
|
2022-08-02 02:25:11 +08:00
|
|
|
if (ret && ref == new_ref) {
|
|
|
|
/*
|
|
|
|
* Cleanup the failed reference here as the target
|
|
|
|
* could now be dead and have already released its
|
|
|
|
* references by now. Calling on the new reference
|
|
|
|
* with strong=0 and a tmp_refs will not decrement
|
|
|
|
* the node. The new_ref gets kfree'd below.
|
|
|
|
*/
|
|
|
|
binder_cleanup_ref_olocked(new_ref);
|
|
|
|
ref = NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (new_ref && ref != new_ref)
|
|
|
|
/*
|
|
|
|
* Another thread created the ref first so
|
|
|
|
* free the one we allocated
|
|
|
|
*/
|
|
|
|
kfree(new_ref);
|
|
|
|
return ret;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:06 +08:00
|
|
|
static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
|
|
|
|
struct binder_transaction *t)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:01:54 +08:00
|
|
|
BUG_ON(!target_thread);
|
2017-08-31 16:04:26 +08:00
|
|
|
assert_spin_locked(&target_thread->proc->inner_lock);
|
2017-06-30 03:01:54 +08:00
|
|
|
BUG_ON(target_thread->transaction_stack != t);
|
|
|
|
BUG_ON(target_thread->transaction_stack->from != target_thread);
|
|
|
|
target_thread->transaction_stack =
|
|
|
|
target_thread->transaction_stack->from_parent;
|
|
|
|
t->from = NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
/**
|
|
|
|
* binder_thread_dec_tmpref() - decrement thread->tmp_ref
|
|
|
|
* @thread: thread to decrement
|
|
|
|
*
|
|
|
|
* A thread needs to be kept alive while being used to create or
|
|
|
|
* handle a transaction. binder_get_txn_from() is used to safely
|
|
|
|
* extract t->from from a binder_transaction and keep the thread
|
|
|
|
* indicated by t->from from being freed. When done with that
|
|
|
|
* binder_thread, this function is called to decrement the
|
|
|
|
* tmp_ref and free if appropriate (thread has been released
|
|
|
|
* and no transaction being processed by the driver)
|
|
|
|
*/
|
|
|
|
static void binder_thread_dec_tmpref(struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* atomic is used to protect the counter value while
|
|
|
|
* it cannot reach zero or thread->is_dead is false
|
|
|
|
*/
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
atomic_dec(&thread->tmp_ref);
|
|
|
|
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_free_thread(thread);
|
|
|
|
return;
|
|
|
|
}
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_proc_dec_tmpref() - decrement proc->tmp_ref
|
|
|
|
* @proc: proc to decrement
|
|
|
|
*
|
|
|
|
* A binder_proc needs to be kept alive while being used to create or
|
|
|
|
* handle a transaction. proc->tmp_ref is incremented when
|
|
|
|
* creating a new transaction or the binder_proc is currently in-use
|
|
|
|
* by threads that are being released. When done with the binder_proc,
|
|
|
|
* this function is called to decrement the counter and free the
|
|
|
|
* proc if appropriate (proc has been released, all threads have
|
|
|
|
* been released and not currenly in-use to process a transaction).
|
|
|
|
*/
|
|
|
|
static void binder_proc_dec_tmpref(struct binder_proc *proc)
|
|
|
|
{
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
proc->tmp_ref--;
|
|
|
|
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
|
|
|
|
!proc->tmp_ref) {
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_free_proc(proc);
|
|
|
|
return;
|
|
|
|
}
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_get_txn_from() - safely extract the "from" thread in transaction
|
|
|
|
* @t: binder transaction for t->from
|
|
|
|
*
|
|
|
|
* Atomically return the "from" thread and increment the tmp_ref
|
|
|
|
* count for the thread to ensure it stays alive until
|
|
|
|
* binder_thread_dec_tmpref() is called.
|
|
|
|
*
|
|
|
|
* Return: the value of t->from
|
|
|
|
*/
|
|
|
|
static struct binder_thread *binder_get_txn_from(
|
|
|
|
struct binder_transaction *t)
|
|
|
|
{
|
|
|
|
struct binder_thread *from;
|
|
|
|
|
|
|
|
spin_lock(&t->lock);
|
|
|
|
from = t->from;
|
|
|
|
if (from)
|
|
|
|
atomic_inc(&from->tmp_ref);
|
|
|
|
spin_unlock(&t->lock);
|
|
|
|
return from;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:06 +08:00
|
|
|
/**
|
|
|
|
* binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
|
|
|
|
* @t: binder transaction for t->from
|
|
|
|
*
|
|
|
|
* Same as binder_get_txn_from() except it also acquires the proc->inner_lock
|
|
|
|
* to guarantee that the thread cannot be released while operating on it.
|
|
|
|
* The caller must call binder_inner_proc_unlock() to release the inner lock
|
|
|
|
* as well as call binder_dec_thread_txn() to release the reference.
|
|
|
|
*
|
|
|
|
* Return: the value of t->from
|
|
|
|
*/
|
|
|
|
static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
|
|
|
struct binder_transaction *t)
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquires(&t->from->proc->inner_lock)
|
2017-06-30 03:02:06 +08:00
|
|
|
{
|
|
|
|
struct binder_thread *from;
|
|
|
|
|
|
|
|
from = binder_get_txn_from(t);
|
2018-11-07 07:56:31 +08:00
|
|
|
if (!from) {
|
|
|
|
__acquire(&from->proc->inner_lock);
|
2017-06-30 03:02:06 +08:00
|
|
|
return NULL;
|
2018-11-07 07:56:31 +08:00
|
|
|
}
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(from->proc);
|
|
|
|
if (t->from) {
|
|
|
|
BUG_ON(from != t->from);
|
|
|
|
return from;
|
|
|
|
}
|
|
|
|
binder_inner_proc_unlock(from->proc);
|
2018-11-07 07:56:31 +08:00
|
|
|
__acquire(&from->proc->inner_lock);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_thread_dec_tmpref(from);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
/**
|
|
|
|
* binder_free_txn_fixups() - free unprocessed fd fixups
|
|
|
|
* @t: binder transaction for t->from
|
|
|
|
*
|
|
|
|
* If the transaction is being torn down prior to being
|
|
|
|
* processed by the target process, free all of the
|
|
|
|
* fd fixups and fput the file structs. It is safe to
|
|
|
|
* call this function after the fixups have been
|
|
|
|
* processed -- in that case, the list will be empty.
|
|
|
|
*/
|
|
|
|
static void binder_free_txn_fixups(struct binder_transaction *t)
|
|
|
|
{
|
|
|
|
struct binder_txn_fd_fixup *fixup, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
|
|
|
fput(fixup->file);
|
2022-03-26 07:24:54 +08:00
|
|
|
if (fixup->target_fd >= 0)
|
|
|
|
put_unused_fd(fixup->target_fd);
|
2018-08-29 04:46:25 +08:00
|
|
|
list_del(&fixup->fixup_entry);
|
|
|
|
kfree(fixup);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-11 11:02:43 +08:00
|
|
|
static void binder_txn_latency_free(struct binder_transaction *t)
|
|
|
|
{
|
|
|
|
int from_proc, from_thread, to_proc, to_thread;
|
|
|
|
|
|
|
|
spin_lock(&t->lock);
|
|
|
|
from_proc = t->from ? t->from->proc->pid : 0;
|
|
|
|
from_thread = t->from ? t->from->pid : 0;
|
|
|
|
to_proc = t->to_proc ? t->to_proc->pid : 0;
|
|
|
|
to_thread = t->to_thread ? t->to_thread->pid : 0;
|
|
|
|
spin_unlock(&t->lock);
|
|
|
|
|
|
|
|
trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:54 +08:00
|
|
|
static void binder_free_transaction(struct binder_transaction *t)
|
|
|
|
{
|
2019-06-13 04:29:27 +08:00
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
|
|
|
|
|
|
if (target_proc) {
|
|
|
|
binder_inner_proc_lock(target_proc);
|
2021-03-16 09:16:28 +08:00
|
|
|
target_proc->outstanding_txns--;
|
|
|
|
if (target_proc->outstanding_txns < 0)
|
|
|
|
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
|
|
|
__func__, target_proc->outstanding_txns);
|
|
|
|
if (!target_proc->outstanding_txns && target_proc->is_frozen)
|
|
|
|
wake_up_interruptible_all(&target_proc->freeze_wait);
|
2019-06-13 04:29:27 +08:00
|
|
|
if (t->buffer)
|
|
|
|
t->buffer->transaction = NULL;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
}
|
2020-11-11 11:02:43 +08:00
|
|
|
if (trace_binder_txn_latency_free_enabled())
|
|
|
|
binder_txn_latency_free(t);
|
2019-06-13 04:29:27 +08:00
|
|
|
/*
|
|
|
|
* If the transaction has no target_proc, then
|
|
|
|
* t->buffer->transaction has already been cleared.
|
|
|
|
*/
|
2018-08-29 04:46:25 +08:00
|
|
|
binder_free_txn_fixups(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
kfree(t);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_send_failed_reply(struct binder_transaction *t,
|
|
|
|
uint32_t error_code)
|
|
|
|
{
|
|
|
|
struct binder_thread *target_thread;
|
2014-07-14 08:31:05 +08:00
|
|
|
struct binder_transaction *next;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
BUG_ON(t->flags & TF_ONE_WAY);
|
|
|
|
while (1) {
|
2017-06-30 03:02:06 +08:00
|
|
|
target_thread = binder_get_txn_from_and_acq_inner(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (target_thread) {
|
2017-06-30 03:01:55 +08:00
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
|
"send failed reply for transaction %d to %d:%d\n",
|
|
|
|
t->debug_id,
|
|
|
|
target_thread->proc->pid,
|
|
|
|
target_thread->pid);
|
|
|
|
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_pop_transaction_ilocked(target_thread, t);
|
2017-06-30 03:01:55 +08:00
|
|
|
if (target_thread->reply_error.cmd == BR_OK) {
|
|
|
|
target_thread->reply_error.cmd = error_code;
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work_ilocked(
|
|
|
|
target_thread,
|
|
|
|
&target_thread->reply_error.work);
|
2011-11-30 19:18:14 +08:00
|
|
|
wake_up_interruptible(&target_thread->wait);
|
|
|
|
} else {
|
ANDROID: binder: remove WARN() for redundant txn error
binder_send_failed_reply() is called when a synchronous
transaction fails. It reports an error to the thread that
is waiting for the completion. Given that the transaction
is synchronous, there should never be more than 1 error
response to that thread -- this was being asserted with
a WARN().
However, when exercising the driver with syzbot tests, cases
were observed where multiple "synchronous" requests were
sent without waiting for responses, so it is possible that
multiple errors would be reported to the thread. This testing
was conducted with panic_on_warn set which forced the crash.
This is easily reproduced by sending back-to-back
"synchronous" transactions without checking for any
response (eg, set read_size to 0):
bwr.write_buffer = (uintptr_t)&bc1;
bwr.write_size = sizeof(bc1);
bwr.read_buffer = (uintptr_t)&br;
bwr.read_size = 0;
ioctl(fd, BINDER_WRITE_READ, &bwr);
sleep(1);
bwr2.write_buffer = (uintptr_t)&bc2;
bwr2.write_size = sizeof(bc2);
bwr2.read_buffer = (uintptr_t)&br;
bwr2.read_size = 0;
ioctl(fd, BINDER_WRITE_READ, &bwr2);
sleep(1);
The first transaction is sent to the servicemanager and the reply
fails because no VMA is set up by this client. After
binder_send_failed_reply() is called, the BINDER_WORK_RETURN_ERROR
is sitting on the thread's todo list since the read_size was 0 and
the client is not waiting for a response.
The 2nd transaction is sent and the BINDER_WORK_RETURN_ERROR has not
been consumed, so the thread's reply_error.cmd is still set (normally
cleared when the BINDER_WORK_RETURN_ERROR is handled). Therefore
when the servicemanager attempts to reply to the 2nd failed
transaction, the error is already set and it triggers this warning.
This is a user error since it is not waiting for the synchronous
transaction to complete. If it ever does check, it will see an
error.
Changed the WARN() to a pr_warn().
Signed-off-by: Todd Kjos <tkjos@android.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-02-08 04:38:47 +08:00
|
|
|
/*
|
|
|
|
* Cannot get here for normal operation, but
|
|
|
|
* we can if multiple synchronous transactions
|
|
|
|
* are sent without blocking for responses.
|
|
|
|
* Just ignore the 2nd error in this case.
|
|
|
|
*/
|
|
|
|
pr_warn("Unexpected reply error: %u\n",
|
|
|
|
target_thread->reply_error.cmd);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(target_thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_thread_dec_tmpref(target_thread);
|
2017-06-30 03:01:55 +08:00
|
|
|
binder_free_transaction(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
return;
|
2014-07-14 08:31:05 +08:00
|
|
|
}
|
2020-07-24 21:13:48 +08:00
|
|
|
__release(&target_thread->proc->inner_lock);
|
2014-07-14 08:31:05 +08:00
|
|
|
next = t->from_parent;
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
|
"send failed reply for transaction %d, target dead\n",
|
|
|
|
t->debug_id);
|
|
|
|
|
2017-06-30 03:01:54 +08:00
|
|
|
binder_free_transaction(t);
|
2014-07-14 08:31:05 +08:00
|
|
|
if (next == NULL) {
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
2014-07-14 08:31:05 +08:00
|
|
|
"reply failed, no target thread at root\n");
|
|
|
|
return;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2014-07-14 08:31:05 +08:00
|
|
|
t = next;
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
|
|
"reply failed, no target thread -- retry %d\n",
|
|
|
|
t->debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-13 17:06:08 +08:00
|
|
|
/**
|
|
|
|
* binder_cleanup_transaction() - cleans up undelivered transaction
|
|
|
|
* @t: transaction that needs to be cleaned up
|
|
|
|
* @reason: reason the transaction wasn't delivered
|
|
|
|
* @error_code: error to return to caller (if synchronous call)
|
|
|
|
*/
|
|
|
|
static void binder_cleanup_transaction(struct binder_transaction *t,
|
|
|
|
const char *reason,
|
|
|
|
uint32_t error_code)
|
|
|
|
{
|
|
|
|
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
|
|
|
|
binder_send_failed_reply(t, error_code);
|
|
|
|
} else {
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
|
|
"undelivered transaction %d, %s\n",
|
|
|
|
t->debug_id, reason);
|
|
|
|
binder_free_transaction(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
/**
|
2019-02-09 02:35:16 +08:00
|
|
|
* binder_get_object() - gets object and checks for valid metadata
|
|
|
|
* @proc: binder_proc owning the buffer
|
2021-12-01 02:51:50 +08:00
|
|
|
* @u: sender's user pointer to base of buffer
|
2017-02-04 06:40:45 +08:00
|
|
|
* @buffer: binder_buffer that we're parsing.
|
2019-02-09 02:35:16 +08:00
|
|
|
* @offset: offset in the @buffer at which to validate an object.
|
|
|
|
* @object: struct binder_object to read into
|
2017-02-04 06:40:45 +08:00
|
|
|
*
|
2021-12-01 02:51:50 +08:00
|
|
|
* Copy the binder object at the given offset into @object. If @u is
|
|
|
|
* provided then the copy is from the sender's buffer. If not, then
|
|
|
|
* it is copied from the target's @buffer.
|
|
|
|
*
|
|
|
|
* Return: If there's a valid metadata object at @offset, the
|
2019-02-09 02:35:16 +08:00
|
|
|
* size of that object. Otherwise, it returns zero. The object
|
|
|
|
* is read into the struct binder_object pointed to by @object.
|
2017-02-04 06:40:45 +08:00
|
|
|
*/
|
2019-02-09 02:35:16 +08:00
|
|
|
static size_t binder_get_object(struct binder_proc *proc,
|
2021-12-01 02:51:50 +08:00
|
|
|
const void __user *u,
|
2019-02-09 02:35:16 +08:00
|
|
|
struct binder_buffer *buffer,
|
|
|
|
unsigned long offset,
|
|
|
|
struct binder_object *object)
|
2017-02-04 06:40:45 +08:00
|
|
|
{
|
2019-02-09 02:35:16 +08:00
|
|
|
size_t read_size;
|
2017-02-04 06:40:45 +08:00
|
|
|
struct binder_object_header *hdr;
|
|
|
|
size_t object_size = 0;
|
|
|
|
|
2019-02-09 02:35:16 +08:00
|
|
|
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
2021-12-01 02:51:50 +08:00
|
|
|
if (offset > buffer->data_size || read_size < sizeof(*hdr))
|
2017-02-04 06:40:45 +08:00
|
|
|
return 0;
|
2021-12-01 02:51:50 +08:00
|
|
|
if (u) {
|
|
|
|
if (copy_from_user(object, u + offset, read_size))
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
|
|
|
|
offset, read_size))
|
|
|
|
return 0;
|
|
|
|
}
|
2017-02-04 06:40:45 +08:00
|
|
|
|
2019-02-09 02:35:16 +08:00
|
|
|
/* Ok, now see if we read a complete object. */
|
|
|
|
hdr = &object->hdr;
|
2017-02-04 06:40:45 +08:00
|
|
|
switch (hdr->type) {
|
|
|
|
case BINDER_TYPE_BINDER:
|
|
|
|
case BINDER_TYPE_WEAK_BINDER:
|
|
|
|
case BINDER_TYPE_HANDLE:
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE:
|
|
|
|
object_size = sizeof(struct flat_binder_object);
|
|
|
|
break;
|
|
|
|
case BINDER_TYPE_FD:
|
|
|
|
object_size = sizeof(struct binder_fd_object);
|
|
|
|
break;
|
2017-02-04 06:40:51 +08:00
|
|
|
case BINDER_TYPE_PTR:
|
|
|
|
object_size = sizeof(struct binder_buffer_object);
|
|
|
|
break;
|
2017-02-04 06:40:52 +08:00
|
|
|
case BINDER_TYPE_FDA:
|
|
|
|
object_size = sizeof(struct binder_fd_array_object);
|
|
|
|
break;
|
2017-02-04 06:40:45 +08:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (offset <= buffer->data_size - object_size &&
|
|
|
|
buffer->data_size >= object_size)
|
|
|
|
return object_size;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-04 06:40:51 +08:00
|
|
|
/**
|
|
|
|
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
|
2019-02-09 02:35:17 +08:00
|
|
|
* @proc: binder_proc owning the buffer
|
2017-02-04 06:40:51 +08:00
|
|
|
* @b: binder_buffer containing the object
|
2019-02-09 02:35:17 +08:00
|
|
|
* @object: struct binder_object to read into
|
2017-02-04 06:40:51 +08:00
|
|
|
* @index: index in offset array at which the binder_buffer_object is
|
|
|
|
* located
|
2019-02-09 02:35:17 +08:00
|
|
|
* @start_offset: points to the start of the offset array
|
|
|
|
* @object_offsetp: offset of @object read from @b
|
2017-02-04 06:40:51 +08:00
|
|
|
* @num_valid: the number of valid offsets in the offset array
|
|
|
|
*
|
|
|
|
* Return: If @index is within the valid range of the offset array
|
|
|
|
* described by @start and @num_valid, and if there's a valid
|
|
|
|
* binder_buffer_object at the offset found in index @index
|
|
|
|
* of the offset array, that object is returned. Otherwise,
|
|
|
|
* %NULL is returned.
|
|
|
|
* Note that the offset found in index @index itself is not
|
|
|
|
* verified; this function assumes that @num_valid elements
|
|
|
|
* from @start were previously verified to have valid offsets.
|
2019-02-09 02:35:17 +08:00
|
|
|
* If @object_offsetp is non-NULL, then the offset within
|
|
|
|
* @b is written to it.
|
2017-02-04 06:40:51 +08:00
|
|
|
*/
|
2019-02-09 02:35:17 +08:00
|
|
|
static struct binder_buffer_object *binder_validate_ptr(
|
|
|
|
struct binder_proc *proc,
|
|
|
|
struct binder_buffer *b,
|
|
|
|
struct binder_object *object,
|
|
|
|
binder_size_t index,
|
|
|
|
binder_size_t start_offset,
|
|
|
|
binder_size_t *object_offsetp,
|
|
|
|
binder_size_t num_valid)
|
2017-02-04 06:40:51 +08:00
|
|
|
{
|
2019-02-09 02:35:17 +08:00
|
|
|
size_t object_size;
|
|
|
|
binder_size_t object_offset;
|
|
|
|
unsigned long buffer_offset;
|
2017-02-04 06:40:51 +08:00
|
|
|
|
|
|
|
if (index >= num_valid)
|
|
|
|
return NULL;
|
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
buffer_offset = start_offset + sizeof(binder_size_t) * index;
|
2019-06-29 00:50:12 +08:00
|
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
|
|
|
|
b, buffer_offset,
|
|
|
|
sizeof(object_offset)))
|
|
|
|
return NULL;
|
2021-12-01 02:51:50 +08:00
|
|
|
object_size = binder_get_object(proc, NULL, b, object_offset, object);
|
2019-02-09 02:35:17 +08:00
|
|
|
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
|
2017-02-04 06:40:51 +08:00
|
|
|
return NULL;
|
2019-02-09 02:35:17 +08:00
|
|
|
if (object_offsetp)
|
|
|
|
*object_offsetp = object_offset;
|
2017-02-04 06:40:51 +08:00
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
return &object->bbo;
|
2017-02-04 06:40:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
|
2019-02-09 02:35:17 +08:00
|
|
|
* @proc: binder_proc owning the buffer
|
2017-02-04 06:40:51 +08:00
|
|
|
* @b: transaction buffer
|
2019-02-09 02:35:17 +08:00
|
|
|
* @objects_start_offset: offset to start of objects buffer
|
|
|
|
* @buffer_obj_offset: offset to binder_buffer_object in which to fix up
|
|
|
|
* @fixup_offset: start offset in @buffer to fix up
|
|
|
|
* @last_obj_offset: offset to last binder_buffer_object that we fixed
|
|
|
|
* @last_min_offset: minimum fixup offset in object at @last_obj_offset
|
2017-02-04 06:40:51 +08:00
|
|
|
*
|
|
|
|
* Return: %true if a fixup in buffer @buffer at offset @offset is
|
|
|
|
* allowed.
|
|
|
|
*
|
|
|
|
* For safety reasons, we only allow fixups inside a buffer to happen
|
|
|
|
* at increasing offsets; additionally, we only allow fixup on the last
|
|
|
|
* buffer object that was verified, or one of its parents.
|
|
|
|
*
|
|
|
|
* Example of what is allowed:
|
|
|
|
*
|
|
|
|
* A
|
|
|
|
* B (parent = A, offset = 0)
|
|
|
|
* C (parent = A, offset = 16)
|
|
|
|
* D (parent = C, offset = 0)
|
|
|
|
* E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
|
|
|
|
*
|
|
|
|
* Examples of what is not allowed:
|
|
|
|
*
|
|
|
|
* Decreasing offsets within the same parent:
|
|
|
|
* A
|
|
|
|
* C (parent = A, offset = 16)
|
|
|
|
* B (parent = A, offset = 0) // decreasing offset within A
|
|
|
|
*
|
|
|
|
* Referring to a parent that wasn't the last object or any of its parents:
|
|
|
|
* A
|
|
|
|
* B (parent = A, offset = 0)
|
|
|
|
* C (parent = A, offset = 0)
|
|
|
|
* C (parent = A, offset = 16)
|
|
|
|
* D (parent = B, offset = 0) // B is not A or any of A's parents
|
|
|
|
*/
|
2019-02-09 02:35:17 +08:00
|
|
|
static bool binder_validate_fixup(struct binder_proc *proc,
|
|
|
|
struct binder_buffer *b,
|
|
|
|
binder_size_t objects_start_offset,
|
|
|
|
binder_size_t buffer_obj_offset,
|
2017-02-04 06:40:51 +08:00
|
|
|
binder_size_t fixup_offset,
|
2019-02-09 02:35:17 +08:00
|
|
|
binder_size_t last_obj_offset,
|
2017-02-04 06:40:51 +08:00
|
|
|
binder_size_t last_min_offset)
|
|
|
|
{
|
2019-02-09 02:35:17 +08:00
|
|
|
if (!last_obj_offset) {
|
2017-02-04 06:40:51 +08:00
|
|
|
/* Nothing to fix up in */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
while (last_obj_offset != buffer_obj_offset) {
|
|
|
|
unsigned long buffer_offset;
|
|
|
|
struct binder_object last_object;
|
|
|
|
struct binder_buffer_object *last_bbo;
|
2021-12-01 02:51:50 +08:00
|
|
|
size_t object_size = binder_get_object(proc, NULL, b,
|
|
|
|
last_obj_offset,
|
2019-02-09 02:35:17 +08:00
|
|
|
&last_object);
|
|
|
|
if (object_size != sizeof(*last_bbo))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
last_bbo = &last_object.bbo;
|
2017-02-04 06:40:51 +08:00
|
|
|
/*
|
|
|
|
* Safe to retrieve the parent of last_obj, since it
|
|
|
|
* was already previously verified by the driver.
|
|
|
|
*/
|
2019-02-09 02:35:17 +08:00
|
|
|
if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
|
2017-02-04 06:40:51 +08:00
|
|
|
return false;
|
2019-02-09 02:35:17 +08:00
|
|
|
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
|
|
|
|
buffer_offset = objects_start_offset +
|
2019-06-29 00:50:12 +08:00
|
|
|
sizeof(binder_size_t) * last_bbo->parent;
|
|
|
|
if (binder_alloc_copy_from_buffer(&proc->alloc,
|
|
|
|
&last_obj_offset,
|
|
|
|
b, buffer_offset,
|
|
|
|
sizeof(last_obj_offset)))
|
|
|
|
return false;
|
2017-02-04 06:40:51 +08:00
|
|
|
}
|
|
|
|
return (fixup_offset >= last_min_offset);
|
|
|
|
}
|
|
|
|
|
2018-12-15 07:58:21 +08:00
|
|
|
/**
|
|
|
|
* struct binder_task_work_cb - for deferred close
|
|
|
|
*
|
|
|
|
* @twork: callback_head for task work
|
|
|
|
* @fd: fd to close
|
|
|
|
*
|
|
|
|
* Structure to pass task work to be handled after
|
|
|
|
* returning from binder_ioctl() via task_work_add().
|
|
|
|
*/
|
|
|
|
struct binder_task_work_cb {
|
|
|
|
struct callback_head twork;
|
|
|
|
struct file *file;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_do_fd_close() - close list of file descriptors
|
|
|
|
* @twork: callback head for task work
|
|
|
|
*
|
|
|
|
* It is not safe to call ksys_close() during the binder_ioctl()
|
|
|
|
* function if there is a chance that binder's own file descriptor
|
|
|
|
* might be closed. This is to meet the requirements for using
|
|
|
|
* fdget() (see comments for __fget_light()). Therefore use
|
|
|
|
* task_work_add() to schedule the close operation once we have
|
|
|
|
* returned from binder_ioctl(). This function is a callback
|
|
|
|
* for that mechanism and does the actual ksys_close() on the
|
|
|
|
* given file descriptor.
|
|
|
|
*/
|
|
|
|
static void binder_do_fd_close(struct callback_head *twork)
|
|
|
|
{
|
|
|
|
struct binder_task_work_cb *twcb = container_of(twork,
|
|
|
|
struct binder_task_work_cb, twork);
|
|
|
|
|
|
|
|
fput(twcb->file);
|
|
|
|
kfree(twcb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_deferred_fd_close() - schedule a close for the given file-descriptor
|
|
|
|
* @fd: file-descriptor to close
|
|
|
|
*
|
|
|
|
* See comments in binder_do_fd_close(). This function is used to schedule
|
|
|
|
* a file-descriptor to be closed after returning from binder_ioctl().
|
|
|
|
*/
|
|
|
|
static void binder_deferred_fd_close(int fd)
|
|
|
|
{
|
|
|
|
struct binder_task_work_cb *twcb;
|
|
|
|
|
|
|
|
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
|
|
|
|
if (!twcb)
|
|
|
|
return;
|
|
|
|
init_task_work(&twcb->twork, binder_do_fd_close);
|
2022-05-13 05:08:03 +08:00
|
|
|
twcb->file = close_fd_get_file(fd);
|
2019-12-12 05:10:35 +08:00
|
|
|
if (twcb->file) {
|
2022-06-06 02:01:42 +08:00
|
|
|
// pin it until binder_do_fd_close(); see comments there
|
|
|
|
get_file(twcb->file);
|
2019-12-12 05:10:35 +08:00
|
|
|
filp_close(twcb->file, current->files);
|
task_work: cleanup notification modes
A previous commit changed the notification mode from true/false to an
int, allowing notify-no, notify-yes, or signal-notify. This was
backwards compatible in the sense that any existing true/false user
would translate to either 0 (on notification sent) or 1, the latter
which mapped to TWA_RESUME. TWA_SIGNAL was assigned a value of 2.
Clean this up properly, and define a proper enum for the notification
mode. Now we have:
- TWA_NONE. This is 0, same as before the original change, meaning no
notification requested.
- TWA_RESUME. This is 1, same as before the original change, meaning
that we use TIF_NOTIFY_RESUME.
- TWA_SIGNAL. This uses TIF_SIGPENDING/JOBCTL_TASK_WORK for the
notification.
Clean up all the callers, switching their 0/1/false/true to using the
appropriate TWA_* mode for notifications.
Fixes: e91b48162332 ("task_work: teach task_work_add() to do signal_wake_up()")
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2020-10-16 23:02:26 +08:00
|
|
|
task_work_add(current, &twcb->twork, TWA_RESUME);
|
2019-12-12 05:10:35 +08:00
|
|
|
} else {
|
2018-12-15 07:58:21 +08:00
|
|
|
kfree(twcb);
|
2019-12-12 05:10:35 +08:00
|
|
|
}
|
2018-12-15 07:58:21 +08:00
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
2021-08-31 03:51:46 +08:00
|
|
|
struct binder_thread *thread,
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_buffer *buffer,
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t failed_at,
|
|
|
|
bool is_failure)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
int debug_id = buffer->debug_id;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t off_start_offset, buffer_offset, off_end_offset;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2019-02-09 02:35:20 +08:00
|
|
|
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, buffer->debug_id,
|
2019-02-09 02:35:20 +08:00
|
|
|
buffer->data_size, buffer->offsets_size,
|
|
|
|
(unsigned long long)failed_at);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
if (buffer->target_node)
|
|
|
|
binder_dec_node(buffer->target_node, 1, 0);
|
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
|
2021-10-16 07:38:11 +08:00
|
|
|
off_end_offset = is_failure && failed_at ? failed_at :
|
2019-02-09 02:35:20 +08:00
|
|
|
off_start_offset + buffer->offsets_size;
|
|
|
|
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
|
|
|
buffer_offset += sizeof(binder_size_t)) {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct binder_object_header *hdr;
|
2019-06-29 00:50:12 +08:00
|
|
|
size_t object_size = 0;
|
2019-02-09 02:35:16 +08:00
|
|
|
struct binder_object object;
|
2019-02-09 02:35:15 +08:00
|
|
|
binder_size_t object_offset;
|
|
|
|
|
2019-06-29 00:50:12 +08:00
|
|
|
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
|
|
|
|
buffer, buffer_offset,
|
|
|
|
sizeof(object_offset)))
|
2021-12-01 02:51:50 +08:00
|
|
|
object_size = binder_get_object(proc, NULL, buffer,
|
2019-06-29 00:50:12 +08:00
|
|
|
object_offset, &object);
|
2017-02-04 06:40:45 +08:00
|
|
|
if (object_size == 0) {
|
|
|
|
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
|
2019-02-09 02:35:15 +08:00
|
|
|
debug_id, (u64)object_offset, buffer->data_size);
|
2011-11-30 19:18:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-02-09 02:35:16 +08:00
|
|
|
hdr = &object.hdr;
|
2017-02-04 06:40:45 +08:00
|
|
|
switch (hdr->type) {
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_TYPE_BINDER:
|
|
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct flat_binder_object *fp;
|
|
|
|
struct binder_node *node;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
fp = to_flat_binder_object(hdr);
|
|
|
|
node = binder_get_node(proc, fp->binder);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (node == NULL) {
|
2014-02-22 06:40:26 +08:00
|
|
|
pr_err("transaction release %d bad node %016llx\n",
|
|
|
|
debug_id, (u64)fp->binder);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2014-02-22 06:40:26 +08:00
|
|
|
" node %d u%016llx\n",
|
|
|
|
node->debug_id, (u64)node->ptr);
|
2017-02-04 06:40:45 +08:00
|
|
|
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
|
|
|
|
0);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
|
|
|
case BINDER_TYPE_HANDLE:
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct flat_binder_object *fp;
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_ref_data rdata;
|
|
|
|
int ret;
|
2016-10-24 21:20:29 +08:00
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
fp = to_flat_binder_object(hdr);
|
2017-06-30 03:01:58 +08:00
|
|
|
ret = binder_dec_ref_for_handle(proc, fp->handle,
|
|
|
|
hdr->type == BINDER_TYPE_HANDLE, &rdata);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
pr_err("transaction release %d bad handle %d, ret = %d\n",
|
|
|
|
debug_id, fp->handle, ret);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2017-06-30 03:01:58 +08:00
|
|
|
" ref %d desc %d\n",
|
|
|
|
rdata.debug_id, rdata.desc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
case BINDER_TYPE_FD: {
|
2018-08-29 04:46:25 +08:00
|
|
|
/*
|
|
|
|
* No need to close the file here since user-space
|
2021-11-25 20:22:18 +08:00
|
|
|
* closes it for successfully delivered
|
2018-08-29 04:46:25 +08:00
|
|
|
* transactions. For transactions that weren't
|
|
|
|
* delivered, the new fd was never allocated so
|
|
|
|
* there is no need to close and the fput on the
|
|
|
|
* file is done when the transaction is torn
|
|
|
|
* down.
|
|
|
|
*/
|
2017-02-04 06:40:45 +08:00
|
|
|
} break;
|
2017-02-04 06:40:51 +08:00
|
|
|
case BINDER_TYPE_PTR:
|
|
|
|
/*
|
|
|
|
* Nothing to do here, this will get cleaned up when the
|
|
|
|
* transaction buffer gets freed
|
|
|
|
*/
|
|
|
|
break;
|
2017-02-04 06:40:52 +08:00
|
|
|
case BINDER_TYPE_FDA: {
|
|
|
|
struct binder_fd_array_object *fda;
|
|
|
|
struct binder_buffer_object *parent;
|
2019-02-09 02:35:17 +08:00
|
|
|
struct binder_object ptr_object;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t fda_offset;
|
2017-02-04 06:40:52 +08:00
|
|
|
size_t fd_index;
|
|
|
|
binder_size_t fd_buf_size;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t num_valid;
|
2017-02-04 06:40:52 +08:00
|
|
|
|
2021-10-16 07:38:11 +08:00
|
|
|
if (is_failure) {
|
2018-08-29 04:46:25 +08:00
|
|
|
/*
|
|
|
|
* The fd fixups have not been applied so no
|
|
|
|
* fds need to be closed.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-02-09 02:35:20 +08:00
|
|
|
num_valid = (buffer_offset - off_start_offset) /
|
|
|
|
sizeof(binder_size_t);
|
2017-02-04 06:40:52 +08:00
|
|
|
fda = to_binder_fd_array_object(hdr);
|
2019-02-09 02:35:17 +08:00
|
|
|
parent = binder_validate_ptr(proc, buffer, &ptr_object,
|
|
|
|
fda->parent,
|
|
|
|
off_start_offset,
|
|
|
|
NULL,
|
2019-02-09 02:35:20 +08:00
|
|
|
num_valid);
|
2017-02-04 06:40:52 +08:00
|
|
|
if (!parent) {
|
2017-09-25 15:22:11 +08:00
|
|
|
pr_err("transaction release %d bad parent offset\n",
|
2017-02-04 06:40:52 +08:00
|
|
|
debug_id);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
|
|
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
|
|
pr_err("transaction release %d invalid number of fds (%lld)\n",
|
|
|
|
debug_id, (u64)fda->num_fds);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (fd_buf_size > parent->length ||
|
|
|
|
fda->parent_offset > parent->length - fd_buf_size) {
|
|
|
|
/* No space for all file descriptors here. */
|
|
|
|
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
|
|
|
|
debug_id, (u64)fda->num_fds);
|
|
|
|
continue;
|
|
|
|
}
|
2019-02-09 02:35:20 +08:00
|
|
|
/*
|
|
|
|
* the source data for binder_buffer_object is visible
|
|
|
|
* to user-space and the @buffer element is the user
|
|
|
|
* pointer to the buffer_object containing the fd_array.
|
|
|
|
* Convert the address to an offset relative to
|
|
|
|
* the base of the transaction buffer.
|
|
|
|
*/
|
|
|
|
fda_offset =
|
|
|
|
(parent->buffer - (uintptr_t)buffer->user_data) +
|
|
|
|
fda->parent_offset;
|
2019-02-09 02:35:15 +08:00
|
|
|
for (fd_index = 0; fd_index < fda->num_fds;
|
|
|
|
fd_index++) {
|
|
|
|
u32 fd;
|
2019-06-29 00:50:12 +08:00
|
|
|
int err;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t offset = fda_offset +
|
|
|
|
fd_index * sizeof(fd);
|
2019-02-09 02:35:15 +08:00
|
|
|
|
2019-06-29 00:50:12 +08:00
|
|
|
err = binder_alloc_copy_from_buffer(
|
|
|
|
&proc->alloc, &fd, buffer,
|
|
|
|
offset, sizeof(fd));
|
|
|
|
WARN_ON(err);
|
2021-08-31 03:51:46 +08:00
|
|
|
if (!err) {
|
2019-06-29 00:50:12 +08:00
|
|
|
binder_deferred_fd_close(fd);
|
2021-08-31 03:51:46 +08:00
|
|
|
/*
|
|
|
|
* Need to make sure the thread goes
|
|
|
|
* back to userspace to complete the
|
|
|
|
* deferred close
|
|
|
|
*/
|
|
|
|
if (thread)
|
|
|
|
thread->looper_need_return = true;
|
|
|
|
}
|
2019-02-09 02:35:15 +08:00
|
|
|
}
|
2017-02-04 06:40:52 +08:00
|
|
|
} break;
|
2011-11-30 19:18:14 +08:00
|
|
|
default:
|
2013-07-04 17:54:48 +08:00
|
|
|
pr_err("transaction release %d bad object type %x\n",
|
2017-02-04 06:40:45 +08:00
|
|
|
debug_id, hdr->type);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-04 06:40:49 +08:00
|
|
|
static int binder_translate_binder(struct flat_binder_object *fp,
|
|
|
|
struct binder_transaction *t,
|
|
|
|
struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_ref_data rdata;
|
2017-06-30 03:01:59 +08:00
|
|
|
int ret = 0;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
|
|
|
node = binder_get_node(proc, fp->binder);
|
|
|
|
if (!node) {
|
2017-06-30 03:02:03 +08:00
|
|
|
node = binder_new_node(proc, fp);
|
2017-02-04 06:40:49 +08:00
|
|
|
if (!node)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
if (fp->cookie != node->cookie) {
|
|
|
|
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
|
|
|
|
proc->pid, thread->pid, (u64)fp->binder,
|
|
|
|
node->debug_id, (u64)fp->cookie,
|
|
|
|
(u64)node->cookie);
|
2017-06-30 03:01:59 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
2021-10-13 00:56:13 +08:00
|
|
|
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
|
2017-06-30 03:01:59 +08:00
|
|
|
ret = -EPERM;
|
|
|
|
goto done;
|
2017-02-04 06:40:49 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
ret = binder_inc_ref_for_node(target_proc, node,
|
|
|
|
fp->hdr.type == BINDER_TYPE_BINDER,
|
|
|
|
&thread->todo, &rdata);
|
|
|
|
if (ret)
|
2017-06-30 03:01:59 +08:00
|
|
|
goto done;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
|
|
|
if (fp->hdr.type == BINDER_TYPE_BINDER)
|
|
|
|
fp->hdr.type = BINDER_TYPE_HANDLE;
|
|
|
|
else
|
|
|
|
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
|
|
|
|
fp->binder = 0;
|
2017-06-30 03:01:58 +08:00
|
|
|
fp->handle = rdata.desc;
|
2017-02-04 06:40:49 +08:00
|
|
|
fp->cookie = 0;
|
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
trace_binder_transaction_node_to_ref(t, node, &rdata);
|
2017-02-04 06:40:49 +08:00
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
" node %d u%016llx -> ref %d desc %d\n",
|
|
|
|
node->debug_id, (u64)node->ptr,
|
2017-06-30 03:01:58 +08:00
|
|
|
rdata.debug_id, rdata.desc);
|
2017-06-30 03:01:59 +08:00
|
|
|
done:
|
|
|
|
binder_put_node(node);
|
|
|
|
return ret;
|
2017-02-04 06:40:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_translate_handle(struct flat_binder_object *fp,
|
|
|
|
struct binder_transaction *t,
|
|
|
|
struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_ref_data src_rdata;
|
2017-06-30 03:01:59 +08:00
|
|
|
int ret = 0;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
2017-06-30 03:01:58 +08:00
|
|
|
node = binder_get_node_from_ref(proc, fp->handle,
|
|
|
|
fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
|
|
|
|
if (!node) {
|
2017-02-04 06:40:49 +08:00
|
|
|
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
|
|
|
proc->pid, thread->pid, fp->handle);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-10-13 00:56:13 +08:00
|
|
|
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
|
2017-06-30 03:01:59 +08:00
|
|
|
ret = -EPERM;
|
|
|
|
goto done;
|
|
|
|
}
|
2017-02-04 06:40:49 +08:00
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(node);
|
2017-06-30 03:01:58 +08:00
|
|
|
if (node->proc == target_proc) {
|
2017-02-04 06:40:49 +08:00
|
|
|
if (fp->hdr.type == BINDER_TYPE_HANDLE)
|
|
|
|
fp->hdr.type = BINDER_TYPE_BINDER;
|
|
|
|
else
|
|
|
|
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
|
2017-06-30 03:01:58 +08:00
|
|
|
fp->binder = node->ptr;
|
|
|
|
fp->cookie = node->cookie;
|
2017-06-30 03:02:03 +08:00
|
|
|
if (node->proc)
|
|
|
|
binder_inner_proc_lock(node->proc);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
__acquire(&node->proc->inner_lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_inc_node_nilocked(node,
|
|
|
|
fp->hdr.type == BINDER_TYPE_BINDER,
|
|
|
|
0, NULL);
|
|
|
|
if (node->proc)
|
|
|
|
binder_inner_proc_unlock(node->proc);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
__release(&node->proc->inner_lock);
|
2017-06-30 03:01:58 +08:00
|
|
|
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
|
2017-02-04 06:40:49 +08:00
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
" ref %d desc %d -> node %d u%016llx\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
src_rdata.debug_id, src_rdata.desc, node->debug_id,
|
|
|
|
(u64)node->ptr);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2017-02-04 06:40:49 +08:00
|
|
|
} else {
|
2017-06-30 03:01:58 +08:00
|
|
|
struct binder_ref_data dest_rdata;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2017-06-30 03:01:58 +08:00
|
|
|
ret = binder_inc_ref_for_node(target_proc, node,
|
|
|
|
fp->hdr.type == BINDER_TYPE_HANDLE,
|
|
|
|
NULL, &dest_rdata);
|
|
|
|
if (ret)
|
2017-06-30 03:01:59 +08:00
|
|
|
goto done;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
|
|
|
fp->binder = 0;
|
2017-06-30 03:01:58 +08:00
|
|
|
fp->handle = dest_rdata.desc;
|
2017-02-04 06:40:49 +08:00
|
|
|
fp->cookie = 0;
|
2017-06-30 03:01:58 +08:00
|
|
|
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
|
|
|
|
&dest_rdata);
|
2017-02-04 06:40:49 +08:00
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
" ref %d desc %d -> ref %d desc %d (node %d)\n",
|
2017-06-30 03:01:58 +08:00
|
|
|
src_rdata.debug_id, src_rdata.desc,
|
|
|
|
dest_rdata.debug_id, dest_rdata.desc,
|
|
|
|
node->debug_id);
|
2017-02-04 06:40:49 +08:00
|
|
|
}
|
2017-06-30 03:01:59 +08:00
|
|
|
done:
|
|
|
|
binder_put_node(node);
|
|
|
|
return ret;
|
2017-02-04 06:40:49 +08:00
|
|
|
}
|
|
|
|
|
2019-02-09 02:35:15 +08:00
|
|
|
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
2017-02-04 06:40:49 +08:00
|
|
|
struct binder_transaction *t,
|
|
|
|
struct binder_thread *thread,
|
|
|
|
struct binder_transaction *in_reply_to)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
2018-08-29 04:46:25 +08:00
|
|
|
struct binder_txn_fd_fixup *fixup;
|
2017-02-04 06:40:49 +08:00
|
|
|
struct file *file;
|
2018-08-29 04:46:25 +08:00
|
|
|
int ret = 0;
|
2017-02-04 06:40:49 +08:00
|
|
|
bool target_allows_fd;
|
|
|
|
|
|
|
|
if (in_reply_to)
|
|
|
|
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
|
|
|
else
|
|
|
|
target_allows_fd = t->buffer->target_node->accept_fds;
|
|
|
|
if (!target_allows_fd) {
|
|
|
|
binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
in_reply_to ? "reply" : "transaction",
|
|
|
|
fd);
|
|
|
|
ret = -EPERM;
|
|
|
|
goto err_fd_not_accepted;
|
|
|
|
}
|
|
|
|
|
|
|
|
file = fget(fd);
|
|
|
|
if (!file) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
|
|
|
|
proc->pid, thread->pid, fd);
|
|
|
|
ret = -EBADF;
|
|
|
|
goto err_fget;
|
|
|
|
}
|
2021-10-13 00:56:13 +08:00
|
|
|
ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
|
2017-02-04 06:40:49 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
ret = -EPERM;
|
|
|
|
goto err_security;
|
|
|
|
}
|
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
/*
|
|
|
|
* Add fixup record for this transaction. The allocation
|
|
|
|
* of the fd in the target needs to be done from a
|
|
|
|
* target thread.
|
|
|
|
*/
|
|
|
|
fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
|
|
|
|
if (!fixup) {
|
2017-02-04 06:40:49 +08:00
|
|
|
ret = -ENOMEM;
|
2018-08-29 04:46:25 +08:00
|
|
|
goto err_alloc;
|
2017-02-04 06:40:49 +08:00
|
|
|
}
|
2018-08-29 04:46:25 +08:00
|
|
|
fixup->file = file;
|
2019-02-09 02:35:15 +08:00
|
|
|
fixup->offset = fd_offset;
|
2022-03-26 07:24:54 +08:00
|
|
|
fixup->target_fd = -1;
|
2018-08-29 04:46:25 +08:00
|
|
|
trace_binder_transaction_fd_send(t, fd, fixup->offset);
|
|
|
|
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
|
2017-02-04 06:40:49 +08:00
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
return ret;
|
2017-02-04 06:40:49 +08:00
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
err_alloc:
|
2017-02-04 06:40:49 +08:00
|
|
|
err_security:
|
|
|
|
fput(file);
|
|
|
|
err_fget:
|
|
|
|
err_fd_not_accepted:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-12-01 02:51:52 +08:00
|
|
|
/**
|
|
|
|
* struct binder_ptr_fixup - data to be fixed-up in target buffer
|
|
|
|
* @offset offset in target buffer to fixup
|
|
|
|
* @skip_size bytes to skip in copy (fixup will be written later)
|
|
|
|
* @fixup_data data to write at fixup offset
|
|
|
|
* @node list node
|
|
|
|
*
|
|
|
|
* This is used for the pointer fixup list (pf) which is created and consumed
|
|
|
|
* during binder_transaction() and is only accessed locally. No
|
|
|
|
* locking is necessary.
|
|
|
|
*
|
|
|
|
* The list is ordered by @offset.
|
|
|
|
*/
|
|
|
|
struct binder_ptr_fixup {
|
|
|
|
binder_size_t offset;
|
|
|
|
size_t skip_size;
|
|
|
|
binder_uintptr_t fixup_data;
|
|
|
|
struct list_head node;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct binder_sg_copy - scatter-gather data to be copied
|
|
|
|
* @offset offset in target buffer
|
|
|
|
* @sender_uaddr user address in source buffer
|
|
|
|
* @length bytes to copy
|
|
|
|
* @node list node
|
|
|
|
*
|
|
|
|
* This is used for the sg copy list (sgc) which is created and consumed
|
|
|
|
* during binder_transaction() and is only accessed locally. No
|
|
|
|
* locking is necessary.
|
|
|
|
*
|
|
|
|
* The list is ordered by @offset.
|
|
|
|
*/
|
|
|
|
struct binder_sg_copy {
|
|
|
|
binder_size_t offset;
|
|
|
|
const void __user *sender_uaddr;
|
|
|
|
size_t length;
|
|
|
|
struct list_head node;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
|
|
|
|
* @alloc: binder_alloc associated with @buffer
|
|
|
|
* @buffer: binder buffer in target process
|
|
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
|
|
* @pf_head: list_head of pointer fixup list
|
|
|
|
*
|
|
|
|
* Processes all elements of @sgc_head, applying fixups from @pf_head
|
|
|
|
* and copying the scatter-gather data from the source process' user
|
|
|
|
* buffer to the target's buffer. It is expected that the list creation
|
|
|
|
* and processing all occurs during binder_transaction() so these lists
|
|
|
|
* are only accessed in local context.
|
|
|
|
*
|
|
|
|
* Return: 0=success, else -errno
|
|
|
|
*/
|
|
|
|
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
|
|
|
|
struct binder_buffer *buffer,
|
|
|
|
struct list_head *sgc_head,
|
|
|
|
struct list_head *pf_head)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct binder_sg_copy *sgc, *tmpsgc;
|
2022-04-15 20:00:15 +08:00
|
|
|
struct binder_ptr_fixup *tmppf;
|
2021-12-01 02:51:52 +08:00
|
|
|
struct binder_ptr_fixup *pf =
|
|
|
|
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
|
|
|
|
node);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
|
|
|
size_t bytes_copied = 0;
|
|
|
|
|
|
|
|
while (bytes_copied < sgc->length) {
|
|
|
|
size_t copy_size;
|
|
|
|
size_t bytes_left = sgc->length - bytes_copied;
|
|
|
|
size_t offset = sgc->offset + bytes_copied;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We copy up to the fixup (pointed to by pf)
|
|
|
|
*/
|
|
|
|
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
|
|
|
|
: bytes_left;
|
|
|
|
if (!ret && copy_size)
|
|
|
|
ret = binder_alloc_copy_user_to_buffer(
|
|
|
|
alloc, buffer,
|
|
|
|
offset,
|
|
|
|
sgc->sender_uaddr + bytes_copied,
|
|
|
|
copy_size);
|
|
|
|
bytes_copied += copy_size;
|
|
|
|
if (copy_size != bytes_left) {
|
|
|
|
BUG_ON(!pf);
|
|
|
|
/* we stopped at a fixup offset */
|
|
|
|
if (pf->skip_size) {
|
|
|
|
/*
|
|
|
|
* we are just skipping. This is for
|
|
|
|
* BINDER_TYPE_FDA where the translated
|
|
|
|
* fds will be fixed up when we get
|
|
|
|
* to target context.
|
|
|
|
*/
|
|
|
|
bytes_copied += pf->skip_size;
|
|
|
|
} else {
|
|
|
|
/* apply the fixup indicated by pf */
|
|
|
|
if (!ret)
|
|
|
|
ret = binder_alloc_copy_to_buffer(
|
|
|
|
alloc, buffer,
|
|
|
|
pf->offset,
|
|
|
|
&pf->fixup_data,
|
|
|
|
sizeof(pf->fixup_data));
|
|
|
|
bytes_copied += sizeof(pf->fixup_data);
|
|
|
|
}
|
|
|
|
list_del(&pf->node);
|
|
|
|
kfree(pf);
|
|
|
|
pf = list_first_entry_or_null(pf_head,
|
|
|
|
struct binder_ptr_fixup, node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_del(&sgc->node);
|
|
|
|
kfree(sgc);
|
|
|
|
}
|
2022-04-15 20:00:15 +08:00
|
|
|
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
|
|
|
BUG_ON(pf->skip_size == 0);
|
|
|
|
list_del(&pf->node);
|
|
|
|
kfree(pf);
|
|
|
|
}
|
2021-12-01 02:51:52 +08:00
|
|
|
BUG_ON(!list_empty(sgc_head));
|
|
|
|
|
|
|
|
return ret > 0 ? -EINVAL : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_cleanup_deferred_txn_lists() - free specified lists
|
|
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
|
|
* @pf_head: list_head of pointer fixup list
|
|
|
|
*
|
|
|
|
* Called to clean up @sgc_head and @pf_head if there is an
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
|
|
|
|
struct list_head *pf_head)
|
|
|
|
{
|
|
|
|
struct binder_sg_copy *sgc, *tmpsgc;
|
|
|
|
struct binder_ptr_fixup *pf, *tmppf;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
|
|
|
|
list_del(&sgc->node);
|
|
|
|
kfree(sgc);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
|
|
|
|
list_del(&pf->node);
|
|
|
|
kfree(pf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_defer_copy() - queue a scatter-gather buffer for copy
|
|
|
|
* @sgc_head: list_head of scatter-gather copy list
|
|
|
|
* @offset: binder buffer offset in target process
|
|
|
|
* @sender_uaddr: user address in source process
|
|
|
|
* @length: bytes to copy
|
|
|
|
*
|
|
|
|
* Specify a scatter-gather block to be copied. The actual copy must
|
|
|
|
* be deferred until all the needed fixups are identified and queued.
|
|
|
|
* Then the copy and fixups are done together so un-translated values
|
|
|
|
* from the source are never visible in the target buffer.
|
|
|
|
*
|
|
|
|
* We are guaranteed that repeated calls to this function will have
|
|
|
|
* monotonically increasing @offset values so the list will naturally
|
|
|
|
* be ordered.
|
|
|
|
*
|
|
|
|
* Return: 0=success, else -errno
|
|
|
|
*/
|
|
|
|
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
|
|
|
|
const void __user *sender_uaddr, size_t length)
|
|
|
|
{
|
|
|
|
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!bc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
bc->offset = offset;
|
|
|
|
bc->sender_uaddr = sender_uaddr;
|
|
|
|
bc->length = length;
|
|
|
|
INIT_LIST_HEAD(&bc->node);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are guaranteed that the deferred copies are in-order
|
|
|
|
* so just add to the tail.
|
|
|
|
*/
|
|
|
|
list_add_tail(&bc->node, sgc_head);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_add_fixup() - queue a fixup to be applied to sg copy
|
|
|
|
* @pf_head: list_head of binder ptr fixup list
|
|
|
|
* @offset: binder buffer offset in target process
|
|
|
|
* @fixup: bytes to be copied for fixup
|
|
|
|
* @skip_size: bytes to skip when copying (fixup will be applied later)
|
|
|
|
*
|
|
|
|
* Add the specified fixup to a list ordered by @offset. When copying
|
|
|
|
* the scatter-gather buffers, the fixup will be copied instead of
|
|
|
|
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
|
|
|
|
* will be applied later (in target process context), so we just skip
|
|
|
|
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
|
|
|
|
* value in @fixup.
|
|
|
|
*
|
|
|
|
* This function is called *mostly* in @offset order, but there are
|
|
|
|
* exceptions. Since out-of-order inserts are relatively uncommon,
|
|
|
|
* we insert the new element by searching backward from the tail of
|
|
|
|
* the list.
|
|
|
|
*
|
|
|
|
* Return: 0=success, else -errno
|
|
|
|
*/
|
|
|
|
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
|
|
|
|
binder_uintptr_t fixup, size_t skip_size)
|
|
|
|
{
|
|
|
|
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
|
|
|
|
struct binder_ptr_fixup *tmppf;
|
|
|
|
|
|
|
|
if (!pf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pf->offset = offset;
|
|
|
|
pf->fixup_data = fixup;
|
|
|
|
pf->skip_size = skip_size;
|
|
|
|
INIT_LIST_HEAD(&pf->node);
|
|
|
|
|
|
|
|
/* Fixups are *mostly* added in-order, but there are some
|
|
|
|
* exceptions. Look backwards through list for insertion point.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_reverse(tmppf, pf_head, node) {
|
|
|
|
if (tmppf->offset < pf->offset) {
|
|
|
|
list_add(&pf->node, &tmppf->node);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* if we get here, then the new offset is the lowest so
|
|
|
|
* insert at the head
|
|
|
|
*/
|
|
|
|
list_add(&pf->node, pf_head);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_translate_fd_array(struct list_head *pf_head,
|
|
|
|
struct binder_fd_array_object *fda,
|
2021-12-01 02:51:51 +08:00
|
|
|
const void __user *sender_ubuffer,
|
2017-02-04 06:40:52 +08:00
|
|
|
struct binder_buffer_object *parent,
|
2021-12-01 02:51:51 +08:00
|
|
|
struct binder_buffer_object *sender_uparent,
|
2017-02-04 06:40:52 +08:00
|
|
|
struct binder_transaction *t,
|
|
|
|
struct binder_thread *thread,
|
|
|
|
struct binder_transaction *in_reply_to)
|
|
|
|
{
|
2018-08-29 04:46:25 +08:00
|
|
|
binder_size_t fdi, fd_buf_size;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t fda_offset;
|
2021-12-01 02:51:51 +08:00
|
|
|
const void __user *sender_ufda_base;
|
2017-02-04 06:40:52 +08:00
|
|
|
struct binder_proc *proc = thread->proc;
|
2021-12-01 02:51:52 +08:00
|
|
|
int ret;
|
2017-02-04 06:40:52 +08:00
|
|
|
|
2022-04-15 20:00:14 +08:00
|
|
|
if (fda->num_fds == 0)
|
|
|
|
return 0;
|
|
|
|
|
2017-02-04 06:40:52 +08:00
|
|
|
fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
|
|
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
|
|
|
|
proc->pid, thread->pid, (u64)fda->num_fds);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (fd_buf_size > parent->length ||
|
|
|
|
fda->parent_offset > parent->length - fd_buf_size) {
|
|
|
|
/* No space for all file descriptors here. */
|
|
|
|
binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
|
|
|
|
proc->pid, thread->pid, (u64)fda->num_fds);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-02-09 02:35:20 +08:00
|
|
|
/*
|
|
|
|
* the source data for binder_buffer_object is visible
|
|
|
|
* to user-space and the @buffer element is the user
|
|
|
|
* pointer to the buffer_object containing the fd_array.
|
|
|
|
* Convert the address to an offset relative to
|
|
|
|
* the base of the transaction buffer.
|
|
|
|
*/
|
|
|
|
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
|
|
|
|
fda->parent_offset;
|
2021-12-07 20:24:42 +08:00
|
|
|
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
|
|
|
|
fda->parent_offset;
|
2021-12-01 02:51:51 +08:00
|
|
|
|
|
|
|
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
|
|
|
|
!IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
|
2017-02-04 06:40:52 +08:00
|
|
|
binder_user_error("%d:%d parent offset not aligned correctly.\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-12-01 02:51:52 +08:00
|
|
|
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-02-04 06:40:52 +08:00
|
|
|
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
2019-02-09 02:35:15 +08:00
|
|
|
u32 fd;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t offset = fda_offset + fdi * sizeof(fd);
|
2021-12-01 02:51:51 +08:00
|
|
|
binder_size_t sender_uoffset = fdi * sizeof(fd);
|
2019-02-09 02:35:15 +08:00
|
|
|
|
2021-12-01 02:51:51 +08:00
|
|
|
ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
|
2019-06-29 00:50:12 +08:00
|
|
|
if (!ret)
|
|
|
|
ret = binder_translate_fd(fd, offset, t, thread,
|
|
|
|
in_reply_to);
|
2021-12-01 02:51:49 +08:00
|
|
|
if (ret)
|
|
|
|
return ret > 0 ? -EINVAL : ret;
|
2017-02-04 06:40:52 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-01 02:51:52 +08:00
|
|
|
static int binder_fixup_parent(struct list_head *pf_head,
|
|
|
|
struct binder_transaction *t,
|
2017-02-04 06:40:51 +08:00
|
|
|
struct binder_thread *thread,
|
|
|
|
struct binder_buffer_object *bp,
|
2019-02-09 02:35:17 +08:00
|
|
|
binder_size_t off_start_offset,
|
2017-02-04 06:40:51 +08:00
|
|
|
binder_size_t num_valid,
|
2019-02-09 02:35:17 +08:00
|
|
|
binder_size_t last_fixup_obj_off,
|
2017-02-04 06:40:51 +08:00
|
|
|
binder_size_t last_fixup_min_off)
|
|
|
|
{
|
|
|
|
struct binder_buffer_object *parent;
|
|
|
|
struct binder_buffer *b = t->buffer;
|
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
2019-02-09 02:35:17 +08:00
|
|
|
struct binder_object object;
|
|
|
|
binder_size_t buffer_offset;
|
|
|
|
binder_size_t parent_offset;
|
2017-02-04 06:40:51 +08:00
|
|
|
|
|
|
|
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
|
|
|
|
return 0;
|
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
|
|
|
|
off_start_offset, &parent_offset,
|
|
|
|
num_valid);
|
2017-02-04 06:40:51 +08:00
|
|
|
if (!parent) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-02-09 02:35:17 +08:00
|
|
|
if (!binder_validate_fixup(target_proc, b, off_start_offset,
|
|
|
|
parent_offset, bp->parent_offset,
|
|
|
|
last_fixup_obj_off,
|
2017-02-04 06:40:51 +08:00
|
|
|
last_fixup_min_off)) {
|
|
|
|
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parent->length < sizeof(binder_uintptr_t) ||
|
|
|
|
bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
|
|
|
|
/* No space for a pointer here! */
|
|
|
|
binder_user_error("%d:%d got transaction with invalid parent offset\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-02-09 02:35:17 +08:00
|
|
|
buffer_offset = bp->parent_offset +
|
2019-02-09 02:35:20 +08:00
|
|
|
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
|
2021-12-01 02:51:52 +08:00
|
|
|
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
|
2017-02-04 06:40:51 +08:00
|
|
|
}
|
|
|
|
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
/**
|
|
|
|
* binder_can_update_transaction() - Can a txn be superseded by an updated one?
|
|
|
|
* @t1: the pending async txn in the frozen process
|
|
|
|
* @t2: the new async txn to supersede the outdated pending one
|
|
|
|
*
|
|
|
|
* Return: true if t2 can supersede t1
|
|
|
|
* false if t2 can not supersede t1
|
|
|
|
*/
|
|
|
|
static bool binder_can_update_transaction(struct binder_transaction *t1,
|
|
|
|
struct binder_transaction *t2)
|
|
|
|
{
|
|
|
|
if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
|
|
|
|
(TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
|
|
|
|
return false;
|
|
|
|
if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
|
|
|
|
t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
|
|
|
|
t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
|
|
|
|
t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* binder_find_outdated_transaction_ilocked() - Find the outdated transaction
|
|
|
|
* @t: new async transaction
|
|
|
|
* @target_list: list to find outdated transaction
|
|
|
|
*
|
|
|
|
* Return: the outdated transaction if found
|
|
|
|
* NULL if no outdated transacton can be found
|
|
|
|
*
|
|
|
|
* Requires the proc->inner_lock to be held.
|
|
|
|
*/
|
|
|
|
static struct binder_transaction *
|
|
|
|
binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
|
|
|
|
struct list_head *target_list)
|
|
|
|
{
|
|
|
|
struct binder_work *w;
|
|
|
|
|
|
|
|
list_for_each_entry(w, target_list, entry) {
|
|
|
|
struct binder_transaction *t_queued;
|
|
|
|
|
|
|
|
if (w->type != BINDER_WORK_TRANSACTION)
|
|
|
|
continue;
|
|
|
|
t_queued = container_of(w, struct binder_transaction, work);
|
|
|
|
if (binder_can_update_transaction(t_queued, t))
|
|
|
|
return t_queued;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-08-31 16:04:19 +08:00
|
|
|
/**
|
|
|
|
* binder_proc_transaction() - sends a transaction to a process and wakes it up
|
|
|
|
* @t: transaction to send
|
|
|
|
* @proc: process to send the transaction to
|
|
|
|
* @thread: thread in @proc to send the transaction to (may be NULL)
|
|
|
|
*
|
|
|
|
* This function queues a transaction to the specified process. It will try
|
|
|
|
* to find a thread in the target process to handle the transaction and
|
|
|
|
* wake it up. If no thread is found, the work is queued to the proc
|
|
|
|
* waitqueue.
|
|
|
|
*
|
|
|
|
* If the @thread parameter is not NULL, the transaction is always queued
|
|
|
|
* to the waitlist of that specific thread.
|
|
|
|
*
|
2021-03-16 09:16:28 +08:00
|
|
|
* Return: 0 if the transaction was successfully queued
|
|
|
|
* BR_DEAD_REPLY if the target process or thread is dead
|
2022-11-24 04:16:54 +08:00
|
|
|
* BR_FROZEN_REPLY if the target process or thread is frozen and
|
|
|
|
* the sync transaction was rejected
|
|
|
|
* BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
|
|
|
|
* and the async transaction was successfully queued
|
2017-08-31 16:04:19 +08:00
|
|
|
*/
|
2021-03-16 09:16:28 +08:00
|
|
|
static int binder_proc_transaction(struct binder_transaction *t,
|
2017-08-31 16:04:19 +08:00
|
|
|
struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
struct binder_node *node = t->buffer->target_node;
|
|
|
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
2017-11-15 16:21:35 +08:00
|
|
|
bool pending_async = false;
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
struct binder_transaction *t_outdated = NULL;
|
2022-11-24 04:16:54 +08:00
|
|
|
bool frozen = false;
|
2017-08-31 16:04:19 +08:00
|
|
|
|
|
|
|
BUG_ON(!node);
|
|
|
|
binder_node_lock(node);
|
|
|
|
if (oneway) {
|
|
|
|
BUG_ON(thread);
|
2020-07-24 21:14:03 +08:00
|
|
|
if (node->has_async_transaction)
|
2017-11-15 16:21:35 +08:00
|
|
|
pending_async = true;
|
2020-07-24 21:14:03 +08:00
|
|
|
else
|
2018-01-24 02:04:27 +08:00
|
|
|
node->has_async_transaction = true;
|
2017-08-31 16:04:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
2021-03-16 09:16:30 +08:00
|
|
|
if (proc->is_frozen) {
|
2022-11-24 04:16:54 +08:00
|
|
|
frozen = true;
|
2021-03-16 09:16:30 +08:00
|
|
|
proc->sync_recv |= !oneway;
|
|
|
|
proc->async_recv |= oneway;
|
|
|
|
}
|
2017-08-31 16:04:19 +08:00
|
|
|
|
2022-11-24 04:16:54 +08:00
|
|
|
if ((frozen && !oneway) || proc->is_dead ||
|
2021-03-16 09:16:28 +08:00
|
|
|
(thread && thread->is_dead)) {
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
binder_node_unlock(node);
|
2022-11-24 04:16:54 +08:00
|
|
|
return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
|
2017-08-31 16:04:19 +08:00
|
|
|
}
|
|
|
|
|
2017-11-15 16:21:35 +08:00
|
|
|
if (!thread && !pending_async)
|
2017-08-31 16:04:19 +08:00
|
|
|
thread = binder_select_thread_ilocked(proc);
|
|
|
|
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
if (thread) {
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work_ilocked(thread, &t->work);
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
} else if (!pending_async) {
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_work_ilocked(&t->work, &proc->todo);
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
} else {
|
2022-11-24 04:16:54 +08:00
|
|
|
if ((t->flags & TF_UPDATE_TXN) && frozen) {
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
t_outdated = binder_find_outdated_transaction_ilocked(t,
|
|
|
|
&node->async_todo);
|
|
|
|
if (t_outdated) {
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
"txn %d supersedes %d\n",
|
|
|
|
t->debug_id, t_outdated->debug_id);
|
|
|
|
list_del_init(&t_outdated->work.entry);
|
|
|
|
proc->outstanding_txns--;
|
|
|
|
}
|
|
|
|
}
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
}
|
2017-08-31 16:04:19 +08:00
|
|
|
|
2017-11-15 16:21:35 +08:00
|
|
|
if (!pending_async)
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
|
|
|
|
2021-03-16 09:16:28 +08:00
|
|
|
proc->outstanding_txns++;
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
binder_node_unlock(node);
|
|
|
|
|
Binder: add TF_UPDATE_TXN to replace outdated txn
When the target process is busy, incoming oneway transactions are
queued in the async_todo list. If the clients continue sending extra
oneway transactions while the target process is frozen, this queue can
become too large to accommodate new transactions. That's why binder
driver introduced ONEWAY_SPAM_DETECTION to detect this situation. It's
helpful to debug the async binder buffer exhausting issue, but the
issue itself isn't solved directly.
In real cases applications are designed to send oneway transactions
repeatedly, delivering updated inforamtion to the target process.
Typical examples are Wi-Fi signal strength and some real time sensor
data. Even if the apps might only care about the lastet information,
all outdated oneway transactions are still accumulated there until the
frozen process is thawed later. For this kind of situations, there's
no existing method to skip those outdated transactions and deliver the
latest one only.
This patch introduces a new transaction flag TF_UPDATE_TXN. To use it,
use apps can set this new flag along with TF_ONE_WAY. When such an
oneway transaction is to be queued into the async_todo list of a frozen
process, binder driver will check if any previous pending transactions
can be superseded by comparing their code, flags and target node. If
such an outdated pending transaction is found, the latest transaction
will supersede that outdated one. This effectively prevents the async
binder buffer running out and saves unnecessary binder read workloads.
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Li Li <dualli@google.com>
Link: https://lore.kernel.org/r/20220526220018.3334775-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-05-27 06:00:18 +08:00
|
|
|
/*
|
|
|
|
* To reduce potential contention, free the outdated transaction and
|
|
|
|
* buffer after releasing the locks.
|
|
|
|
*/
|
|
|
|
if (t_outdated) {
|
|
|
|
struct binder_buffer *buffer = t_outdated->buffer;
|
|
|
|
|
|
|
|
t_outdated->buffer = NULL;
|
|
|
|
buffer->transaction = NULL;
|
|
|
|
trace_binder_transaction_update_buffer_release(buffer);
|
|
|
|
binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
|
|
|
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
|
|
|
kfree(t_outdated);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
|
|
}
|
|
|
|
|
2022-11-24 04:16:54 +08:00
|
|
|
if (oneway && frozen)
|
|
|
|
return BR_TRANSACTION_PENDING_FROZEN;
|
|
|
|
|
2021-03-16 09:16:28 +08:00
|
|
|
return 0;
|
2017-08-31 16:04:19 +08:00
|
|
|
}
|
|
|
|
|
2017-09-30 06:39:49 +08:00
|
|
|
/**
|
|
|
|
* binder_get_node_refs_for_txn() - Get required refs on node for txn
|
|
|
|
* @node: struct binder_node for which to get refs
|
2023-01-18 02:37:45 +08:00
|
|
|
* @procp: returns @node->proc if valid
|
|
|
|
* @error: if no @procp then returns BR_DEAD_REPLY
|
2017-09-30 06:39:49 +08:00
|
|
|
*
|
|
|
|
* User-space normally keeps the node alive when creating a transaction
|
|
|
|
* since it has a reference to the target. The local strong ref keeps it
|
|
|
|
* alive if the sending process dies before the target process processes
|
|
|
|
* the transaction. If the source process is malicious or has a reference
|
|
|
|
* counting bug, relying on the local strong ref can fail.
|
|
|
|
*
|
|
|
|
* Since user-space can cause the local strong ref to go away, we also take
|
|
|
|
* a tmpref on the node to ensure it survives while we are constructing
|
|
|
|
* the transaction. We also need a tmpref on the proc while we are
|
|
|
|
* constructing the transaction, so we take that here as well.
|
|
|
|
*
|
|
|
|
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
|
2023-01-18 02:37:45 +08:00
|
|
|
* Also sets @procp if valid. If the @node->proc is NULL indicating that the
|
|
|
|
* target proc has died, @error is set to BR_DEAD_REPLY.
|
2017-09-30 06:39:49 +08:00
|
|
|
*/
|
|
|
|
static struct binder_node *binder_get_node_refs_for_txn(
|
|
|
|
struct binder_node *node,
|
|
|
|
struct binder_proc **procp,
|
|
|
|
uint32_t *error)
|
|
|
|
{
|
|
|
|
struct binder_node *target_node = NULL;
|
|
|
|
|
|
|
|
binder_node_inner_lock(node);
|
|
|
|
if (node->proc) {
|
|
|
|
target_node = node;
|
|
|
|
binder_inc_node_nilocked(node, 1, 0, NULL);
|
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
|
|
node->proc->tmp_ref++;
|
|
|
|
*procp = node->proc;
|
|
|
|
} else
|
|
|
|
*error = BR_DEAD_REPLY;
|
|
|
|
binder_node_inner_unlock(node);
|
|
|
|
|
|
|
|
return target_node;
|
|
|
|
}
|
|
|
|
|
2022-04-30 07:56:41 +08:00
|
|
|
static void binder_set_txn_from_error(struct binder_transaction *t, int id,
|
|
|
|
uint32_t command, int32_t param)
|
|
|
|
{
|
|
|
|
struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
|
|
|
|
|
|
|
|
if (!from) {
|
|
|
|
/* annotation for sparse */
|
|
|
|
__release(&from->proc->inner_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* don't override existing errors */
|
|
|
|
if (from->ee.command == BR_OK)
|
|
|
|
binder_set_extended_error(&from->ee, id, command, param);
|
|
|
|
binder_inner_proc_unlock(from->proc);
|
|
|
|
binder_thread_dec_tmpref(from);
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static void binder_transaction(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
2017-02-04 06:40:50 +08:00
|
|
|
struct binder_transaction_data *tr, int reply,
|
|
|
|
binder_size_t extra_buffers_size)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-02-04 06:40:49 +08:00
|
|
|
int ret;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_transaction *t;
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
struct binder_work *w;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_work *tcomplete;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t buffer_offset = 0;
|
|
|
|
binder_size_t off_start_offset, off_end_offset;
|
2016-02-10 13:05:32 +08:00
|
|
|
binder_size_t off_min;
|
2019-02-09 02:35:20 +08:00
|
|
|
binder_size_t sg_buf_offset, sg_buf_end_offset;
|
2021-12-01 02:51:50 +08:00
|
|
|
binder_size_t user_offset = 0;
|
2017-06-30 03:01:57 +08:00
|
|
|
struct binder_proc *target_proc = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_thread *target_thread = NULL;
|
|
|
|
struct binder_node *target_node = NULL;
|
|
|
|
struct binder_transaction *in_reply_to = NULL;
|
|
|
|
struct binder_transaction_log_entry *e;
|
2017-06-30 03:01:46 +08:00
|
|
|
uint32_t return_error = 0;
|
|
|
|
uint32_t return_error_param = 0;
|
|
|
|
uint32_t return_error_line = 0;
|
2019-02-09 02:35:17 +08:00
|
|
|
binder_size_t last_fixup_obj_off = 0;
|
2017-02-04 06:40:51 +08:00
|
|
|
binder_size_t last_fixup_min_off = 0;
|
2017-02-04 06:40:46 +08:00
|
|
|
struct binder_context *context = proc->context;
|
2017-06-30 03:01:53 +08:00
|
|
|
int t_debug_id = atomic_inc_return(&binder_last_id);
|
2019-01-15 01:10:21 +08:00
|
|
|
char *secctx = NULL;
|
|
|
|
u32 secctx_sz = 0;
|
2021-12-01 02:51:52 +08:00
|
|
|
struct list_head sgc_head;
|
|
|
|
struct list_head pf_head;
|
2021-12-01 02:51:50 +08:00
|
|
|
const void __user *user_buffer = (const void __user *)
|
|
|
|
(uintptr_t)tr->data.ptr.buffer;
|
2021-12-01 02:51:52 +08:00
|
|
|
INIT_LIST_HEAD(&sgc_head);
|
|
|
|
INIT_LIST_HEAD(&pf_head);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
e = binder_transaction_log_add(&binder_transaction_log);
|
2017-06-30 03:01:53 +08:00
|
|
|
e->debug_id = t_debug_id;
|
2011-11-30 19:18:14 +08:00
|
|
|
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
|
|
|
|
e->from_proc = proc->pid;
|
|
|
|
e->from_thread = thread->pid;
|
|
|
|
e->target_handle = tr->target.handle;
|
|
|
|
e->data_size = tr->data_size;
|
|
|
|
e->offsets_size = tr->offsets_size;
|
2019-10-08 21:01:59 +08:00
|
|
|
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2022-04-30 07:56:41 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (reply) {
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
in_reply_to = thread->transaction_stack;
|
|
|
|
if (in_reply_to == NULL) {
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EPROTO;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_empty_call_stack;
|
|
|
|
}
|
|
|
|
if (in_reply_to->to_thread != thread) {
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_lock(&in_reply_to->lock);
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, in_reply_to->debug_id,
|
|
|
|
in_reply_to->to_proc ?
|
|
|
|
in_reply_to->to_proc->pid : 0,
|
|
|
|
in_reply_to->to_thread ?
|
|
|
|
in_reply_to->to_thread->pid : 0);
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_unlock(&in_reply_to->lock);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EPROTO;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
in_reply_to = NULL;
|
|
|
|
goto err_bad_call_stack;
|
|
|
|
}
|
|
|
|
thread->transaction_stack = in_reply_to->to_parent;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
binder_set_nice(in_reply_to->saved_priority);
|
|
|
|
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (target_thread == NULL) {
|
2018-11-07 07:56:31 +08:00
|
|
|
/* annotation for sparse */
|
|
|
|
__release(&target_thread->proc->inner_lock);
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d reply target not found\n",
|
|
|
|
thread->pid, proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_DEAD_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_dead_binder;
|
|
|
|
}
|
|
|
|
if (target_thread->transaction_stack != in_reply_to) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
target_thread->transaction_stack ?
|
|
|
|
target_thread->transaction_stack->debug_id : 0,
|
|
|
|
in_reply_to->debug_id);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(target_thread->proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EPROTO;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
in_reply_to = NULL;
|
|
|
|
target_thread = NULL;
|
|
|
|
goto err_dead_binder;
|
|
|
|
}
|
|
|
|
target_proc = target_thread->proc;
|
2017-06-30 03:01:57 +08:00
|
|
|
target_proc->tmp_ref++;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(target_thread->proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
|
|
|
if (tr->target.handle) {
|
|
|
|
struct binder_ref *ref;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:01:56 +08:00
|
|
|
/*
|
|
|
|
* There must already be a strong ref
|
|
|
|
* on this node. If so, do a strong
|
|
|
|
* increment on the node to ensure it
|
|
|
|
* stays alive until the transaction is
|
|
|
|
* done.
|
|
|
|
*/
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_olocked(proc, tr->target.handle,
|
|
|
|
true);
|
2017-06-30 03:01:56 +08:00
|
|
|
if (ref) {
|
2017-09-30 06:39:49 +08:00
|
|
|
target_node = binder_get_node_refs_for_txn(
|
|
|
|
ref->node, &target_proc,
|
|
|
|
&return_error);
|
|
|
|
} else {
|
2021-08-03 06:04:45 +08:00
|
|
|
binder_user_error("%d:%d got transaction to invalid handle, %u\n",
|
|
|
|
proc->pid, thread->pid, tr->target.handle);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
}
|
2017-09-30 06:39:49 +08:00
|
|
|
binder_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
2017-02-04 06:40:46 +08:00
|
|
|
target_node = context->binder_context_mgr_node;
|
2017-09-30 06:39:49 +08:00
|
|
|
if (target_node)
|
|
|
|
target_node = binder_get_node_refs_for_txn(
|
|
|
|
target_node, &target_proc,
|
|
|
|
&return_error);
|
|
|
|
else
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_DEAD_REPLY;
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
2019-07-16 03:18:04 +08:00
|
|
|
if (target_node && target_proc->pid == proc->pid) {
|
2018-03-28 17:14:50 +08:00
|
|
|
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_invalid_target_handle;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-09-30 06:39:49 +08:00
|
|
|
if (!target_node) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d cannot find target node\n",
|
|
|
|
thread->pid, proc->pid);
|
2017-09-30 06:39:49 +08:00
|
|
|
/*
|
|
|
|
* return_error is set above
|
|
|
|
*/
|
|
|
|
return_error_param = -EINVAL;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_dead_binder;
|
|
|
|
}
|
2017-09-30 06:39:49 +08:00
|
|
|
e->to_node = target_node->debug_id;
|
binder: Prevent context manager from incrementing ref 0
Binder is designed such that a binder_proc never has references to
itself. If this rule is violated, memory corruption can occur when a
process sends a transaction to itself; see e.g.
<https://syzkaller.appspot.com/bug?extid=09e05aba06723a94d43d>.
There is a remaining edgecase through which such a transaction-to-self
can still occur from the context of a task with BINDER_SET_CONTEXT_MGR
access:
- task A opens /dev/binder twice, creating binder_proc instances P1
and P2
- P1 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 0 in its
handle table
- P1 dies (by closing the /dev/binder fd and waiting a bit)
- P2 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 1 in its
handle table
[this triggers a warning: "binder: 1974:1974 tried to acquire
reference to desc 0, got 1 instead"]
- task B opens /dev/binder once, creating binder_proc instance P3
- P3 calls P2 (via magic handle 0) with (void*)1 as argument (two-way
transaction)
- P2 receives the handle and uses it to call P3 (two-way transaction)
- P3 calls P2 (via magic handle 0) (two-way transaction)
- P2 calls P2 (via handle 1) (two-way transaction)
And then, if P2 does *NOT* accept the incoming transaction work, but
instead closes the binder fd, we get a crash.
Solve it by preventing the context manager from using ACQUIRE on ref 0.
There shouldn't be any legitimate reason for the context manager to do
that.
Additionally, print a warning if someone manages to find another way to
trigger a transaction-to-self bug in the future.
Cc: stable@vger.kernel.org
Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Jann Horn <jannh@google.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Link: https://lore.kernel.org/r/20200727120424.1627555-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-07-27 20:04:24 +08:00
|
|
|
if (WARN_ON(proc == target_proc)) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d self transactions not allowed\n",
|
|
|
|
thread->pid, proc->pid);
|
binder: Prevent context manager from incrementing ref 0
Binder is designed such that a binder_proc never has references to
itself. If this rule is violated, memory corruption can occur when a
process sends a transaction to itself; see e.g.
<https://syzkaller.appspot.com/bug?extid=09e05aba06723a94d43d>.
There is a remaining edgecase through which such a transaction-to-self
can still occur from the context of a task with BINDER_SET_CONTEXT_MGR
access:
- task A opens /dev/binder twice, creating binder_proc instances P1
and P2
- P1 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 0 in its
handle table
- P1 dies (by closing the /dev/binder fd and waiting a bit)
- P2 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 1 in its
handle table
[this triggers a warning: "binder: 1974:1974 tried to acquire
reference to desc 0, got 1 instead"]
- task B opens /dev/binder once, creating binder_proc instance P3
- P3 calls P2 (via magic handle 0) with (void*)1 as argument (two-way
transaction)
- P2 receives the handle and uses it to call P3 (two-way transaction)
- P3 calls P2 (via magic handle 0) (two-way transaction)
- P2 calls P2 (via handle 1) (two-way transaction)
And then, if P2 does *NOT* accept the incoming transaction work, but
instead closes the binder fd, we get a crash.
Solve it by preventing the context manager from using ACQUIRE on ref 0.
There shouldn't be any legitimate reason for the context manager to do
that.
Additionally, print a warning if someone manages to find another way to
trigger a transaction-to-self bug in the future.
Cc: stable@vger.kernel.org
Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Jann Horn <jannh@google.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Link: https://lore.kernel.org/r/20200727120424.1627555-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-07-27 20:04:24 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_invalid_target_handle;
|
|
|
|
}
|
2021-10-13 00:56:13 +08:00
|
|
|
if (security_binder_transaction(proc->cred,
|
|
|
|
target_proc->cred) < 0) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d transaction credentials failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2015-01-21 23:54:10 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EPERM;
|
|
|
|
return_error_line = __LINE__;
|
2015-01-21 23:54:10 +08:00
|
|
|
goto err_invalid_target_handle;
|
|
|
|
}
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
|
|
|
|
w = list_first_entry_or_null(&thread->todo,
|
|
|
|
struct binder_work, entry);
|
|
|
|
if (!(tr->flags & TF_ONE_WAY) && w &&
|
|
|
|
w->type == BINDER_WORK_TRANSACTION) {
|
|
|
|
/*
|
|
|
|
* Do not allow new outgoing transaction from a
|
|
|
|
* thread that has a transaction at the head of
|
|
|
|
* its todo list. Only need to check the head
|
|
|
|
* because binder_select_thread_ilocked picks a
|
|
|
|
* thread from proc->waiting_threads to enqueue
|
|
|
|
* the transaction, and nothing is queued to the
|
|
|
|
* todo list while the thread is on waiting_threads.
|
|
|
|
*/
|
|
|
|
binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EPROTO;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_bad_todo_list;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
|
|
|
|
struct binder_transaction *tmp;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
tmp = thread->transaction_stack;
|
|
|
|
if (tmp->to_thread != thread) {
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_lock(&tmp->lock);
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, tmp->debug_id,
|
|
|
|
tmp->to_proc ? tmp->to_proc->pid : 0,
|
|
|
|
tmp->to_thread ?
|
|
|
|
tmp->to_thread->pid : 0);
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_unlock(&tmp->lock);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EPROTO;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_bad_call_stack;
|
|
|
|
}
|
|
|
|
while (tmp) {
|
2017-06-30 03:01:57 +08:00
|
|
|
struct binder_thread *from;
|
|
|
|
|
|
|
|
spin_lock(&tmp->lock);
|
|
|
|
from = tmp->from;
|
|
|
|
if (from && from->proc == target_proc) {
|
|
|
|
atomic_inc(&from->tmp_ref);
|
|
|
|
target_thread = from;
|
|
|
|
spin_unlock(&tmp->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&tmp->lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
tmp = tmp->from_parent;
|
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-08-31 16:04:19 +08:00
|
|
|
if (target_thread)
|
2011-11-30 19:18:14 +08:00
|
|
|
e->to_thread = target_thread->pid;
|
|
|
|
e->to_proc = target_proc->pid;
|
|
|
|
|
|
|
|
/* TODO: reuse incoming transaction for reply */
|
|
|
|
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
|
|
|
if (t == NULL) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d cannot allocate transaction\n",
|
|
|
|
thread->pid, proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -ENOMEM;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_alloc_t_failed;
|
|
|
|
}
|
2018-08-29 04:46:25 +08:00
|
|
|
INIT_LIST_HEAD(&t->fd_fixups);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stats_created(BINDER_STAT_TRANSACTION);
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_lock_init(&t->lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
|
|
|
|
if (tcomplete == NULL) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d cannot allocate work for transaction\n",
|
|
|
|
thread->pid, proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -ENOMEM;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_alloc_tcomplete_failed;
|
|
|
|
}
|
|
|
|
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
|
|
2017-06-30 03:01:53 +08:00
|
|
|
t->debug_id = t_debug_id;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
if (reply)
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2017-02-04 06:40:50 +08:00
|
|
|
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, t->debug_id,
|
|
|
|
target_proc->pid, target_thread->pid,
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)tr->data.ptr.buffer,
|
|
|
|
(u64)tr->data.ptr.offsets,
|
2017-02-04 06:40:50 +08:00
|
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
|
|
(u64)extra_buffers_size);
|
2011-11-30 19:18:14 +08:00
|
|
|
else
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2017-02-04 06:40:50 +08:00
|
|
|
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, t->debug_id,
|
|
|
|
target_proc->pid, target_node->debug_id,
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)tr->data.ptr.buffer,
|
|
|
|
(u64)tr->data.ptr.offsets,
|
2017-02-04 06:40:50 +08:00
|
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
|
|
(u64)extra_buffers_size);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
if (!reply && !(tr->flags & TF_ONE_WAY))
|
|
|
|
t->from = thread;
|
|
|
|
else
|
|
|
|
t->from = NULL;
|
2021-11-13 02:07:20 +08:00
|
|
|
t->sender_euid = task_euid(proc->tsk);
|
2011-11-30 19:18:14 +08:00
|
|
|
t->to_proc = target_proc;
|
|
|
|
t->to_thread = target_thread;
|
|
|
|
t->code = tr->code;
|
|
|
|
t->flags = tr->flags;
|
|
|
|
t->priority = task_nice(current);
|
2012-10-17 06:29:53 +08:00
|
|
|
|
2019-01-15 01:10:21 +08:00
|
|
|
if (target_node && target_node->txn_security_ctx) {
|
|
|
|
u32 secid;
|
2019-04-25 03:31:18 +08:00
|
|
|
size_t added_size;
|
2019-01-15 01:10:21 +08:00
|
|
|
|
2021-10-13 00:56:14 +08:00
|
|
|
security_cred_getsecid(proc->cred, &secid);
|
2019-01-15 01:10:21 +08:00
|
|
|
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
|
|
|
|
if (ret) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d failed to get security context\n",
|
|
|
|
thread->pid, proc->pid);
|
2019-01-15 01:10:21 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = ret;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_get_secctx_failed;
|
|
|
|
}
|
2019-04-25 03:31:18 +08:00
|
|
|
added_size = ALIGN(secctx_sz, sizeof(u64));
|
|
|
|
extra_buffers_size += added_size;
|
|
|
|
if (extra_buffers_size < added_size) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
|
|
|
|
thread->pid, proc->pid);
|
2019-04-25 03:31:18 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2020-10-26 19:03:14 +08:00
|
|
|
return_error_param = -EINVAL;
|
2019-04-25 03:31:18 +08:00
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_bad_extra_size;
|
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
}
|
|
|
|
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_transaction(reply, t, target_node);
|
|
|
|
|
2017-06-30 03:01:40 +08:00
|
|
|
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
|
2017-02-04 06:40:50 +08:00
|
|
|
tr->offsets_size, extra_buffers_size,
|
2020-08-21 20:25:44 +08:00
|
|
|
!reply && (t->flags & TF_ONE_WAY), current->tgid);
|
2017-06-30 03:01:46 +08:00
|
|
|
if (IS_ERR(t->buffer)) {
|
2022-04-30 07:56:44 +08:00
|
|
|
char *s;
|
|
|
|
|
|
|
|
ret = PTR_ERR(t->buffer);
|
|
|
|
s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
|
|
|
|
: (ret == -ENOSPC) ? ": no space left"
|
|
|
|
: (ret == -ENOMEM) ? ": memory allocation failed"
|
|
|
|
: "";
|
|
|
|
binder_txn_error("cannot allocate buffer%s", s);
|
|
|
|
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = PTR_ERR(t->buffer);
|
|
|
|
return_error = return_error_param == -ESRCH ?
|
|
|
|
BR_DEAD_REPLY : BR_FAILED_REPLY;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
t->buffer = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_binder_alloc_buf_failed;
|
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
if (secctx) {
|
2019-06-29 00:50:12 +08:00
|
|
|
int err;
|
2019-01-15 01:10:21 +08:00
|
|
|
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
|
|
|
|
ALIGN(tr->offsets_size, sizeof(void *)) +
|
|
|
|
ALIGN(extra_buffers_size, sizeof(void *)) -
|
|
|
|
ALIGN(secctx_sz, sizeof(u64));
|
|
|
|
|
2019-02-09 02:35:20 +08:00
|
|
|
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
|
2019-06-29 00:50:12 +08:00
|
|
|
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer, buf_offset,
|
|
|
|
secctx, secctx_sz);
|
|
|
|
if (err) {
|
|
|
|
t->security_ctx = 0;
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
security_release_secctx(secctx, secctx_sz);
|
|
|
|
secctx = NULL;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
t->buffer->debug_id = t->debug_id;
|
|
|
|
t->buffer->transaction = t;
|
|
|
|
t->buffer->target_node = target_node;
|
2020-11-21 07:37:43 +08:00
|
|
|
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_transaction_alloc_buf(t->buffer);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2019-02-09 02:35:14 +08:00
|
|
|
if (binder_alloc_copy_user_to_buffer(
|
|
|
|
&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
ALIGN(tr->data_size, sizeof(void *)),
|
|
|
|
(const void __user *)
|
|
|
|
(uintptr_t)tr->data.ptr.offsets,
|
|
|
|
tr->offsets_size)) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
|
|
proc->pid, thread->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EFAULT;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_copy_data_failed;
|
|
|
|
}
|
2014-02-22 06:40:26 +08:00
|
|
|
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
|
|
|
|
proc->pid, thread->pid, (u64)tr->offsets_size);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_bad_offset;
|
|
|
|
}
|
2017-02-04 06:40:51 +08:00
|
|
|
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
|
|
|
|
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)extra_buffers_size);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:51 +08:00
|
|
|
goto err_bad_offset;
|
|
|
|
}
|
2019-02-09 02:35:20 +08:00
|
|
|
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
|
|
|
|
buffer_offset = off_start_offset;
|
|
|
|
off_end_offset = off_start_offset + tr->offsets_size;
|
|
|
|
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
|
2019-07-09 19:09:23 +08:00
|
|
|
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
|
|
|
|
ALIGN(secctx_sz, sizeof(u64));
|
2016-02-10 13:05:32 +08:00
|
|
|
off_min = 0;
|
2019-02-09 02:35:20 +08:00
|
|
|
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
|
|
|
buffer_offset += sizeof(binder_size_t)) {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct binder_object_header *hdr;
|
2019-02-09 02:35:15 +08:00
|
|
|
size_t object_size;
|
2019-02-09 02:35:16 +08:00
|
|
|
struct binder_object object;
|
2019-02-09 02:35:15 +08:00
|
|
|
binder_size_t object_offset;
|
2021-12-01 02:51:50 +08:00
|
|
|
binder_size_t copy_size;
|
2019-02-09 02:35:15 +08:00
|
|
|
|
2019-06-29 00:50:12 +08:00
|
|
|
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
|
|
|
|
&object_offset,
|
|
|
|
t->buffer,
|
|
|
|
buffer_offset,
|
|
|
|
sizeof(object_offset))) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d copy offset from buffer failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2019-06-29 00:50:12 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_bad_offset;
|
|
|
|
}
|
2021-12-01 02:51:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the source user buffer up to the next object
|
|
|
|
* that will be processed.
|
|
|
|
*/
|
|
|
|
copy_size = object_offset - user_offset;
|
|
|
|
if (copy_size && (user_offset > object_offset ||
|
|
|
|
binder_alloc_copy_user_to_buffer(
|
|
|
|
&target_proc->alloc,
|
|
|
|
t->buffer, user_offset,
|
|
|
|
user_buffer + user_offset,
|
|
|
|
copy_size))) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EFAULT;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_copy_data_failed;
|
|
|
|
}
|
|
|
|
object_size = binder_get_object(target_proc, user_buffer,
|
|
|
|
t->buffer, object_offset, &object);
|
2019-02-09 02:35:15 +08:00
|
|
|
if (object_size == 0 || object_offset < off_min) {
|
2017-02-04 06:40:45 +08:00
|
|
|
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
|
2019-02-09 02:35:15 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)object_offset,
|
2016-02-10 13:05:32 +08:00
|
|
|
(u64)off_min,
|
2017-02-04 06:40:45 +08:00
|
|
|
(u64)t->buffer->data_size);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_bad_offset;
|
|
|
|
}
|
2021-12-01 02:51:50 +08:00
|
|
|
/*
|
|
|
|
* Set offset to the next buffer fragment to be
|
|
|
|
* copied
|
|
|
|
*/
|
|
|
|
user_offset = object_offset + object_size;
|
2017-02-04 06:40:45 +08:00
|
|
|
|
2019-02-09 02:35:16 +08:00
|
|
|
hdr = &object.hdr;
|
2019-02-09 02:35:15 +08:00
|
|
|
off_min = object_offset + object_size;
|
2017-02-04 06:40:45 +08:00
|
|
|
switch (hdr->type) {
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_TYPE_BINDER:
|
|
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct flat_binder_object *fp;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
fp = to_flat_binder_object(hdr);
|
2017-02-04 06:40:49 +08:00
|
|
|
ret = binder_translate_binder(fp, t, thread);
|
2019-06-29 00:50:12 +08:00
|
|
|
|
|
|
|
if (ret < 0 ||
|
|
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
object_offset,
|
|
|
|
fp, sizeof(*fp))) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d translate binder failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = ret;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:49 +08:00
|
|
|
goto err_translate_failed;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
case BINDER_TYPE_HANDLE:
|
|
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct flat_binder_object *fp;
|
2016-10-24 21:20:29 +08:00
|
|
|
|
2017-02-04 06:40:45 +08:00
|
|
|
fp = to_flat_binder_object(hdr);
|
2017-02-04 06:40:49 +08:00
|
|
|
ret = binder_translate_handle(fp, t, thread);
|
2019-06-29 00:50:12 +08:00
|
|
|
if (ret < 0 ||
|
|
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
object_offset,
|
|
|
|
fp, sizeof(*fp))) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d translate handle failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2015-01-21 23:54:10 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = ret;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:49 +08:00
|
|
|
goto err_translate_failed;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case BINDER_TYPE_FD: {
|
2017-02-04 06:40:45 +08:00
|
|
|
struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
2019-02-09 02:35:15 +08:00
|
|
|
binder_size_t fd_offset = object_offset +
|
|
|
|
(uintptr_t)&fp->fd - (uintptr_t)fp;
|
|
|
|
int ret = binder_translate_fd(fp->fd, fd_offset, t,
|
|
|
|
thread, in_reply_to);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2019-06-29 00:50:12 +08:00
|
|
|
fp->pad_binder = 0;
|
|
|
|
if (ret < 0 ||
|
|
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
object_offset,
|
|
|
|
fp, sizeof(*fp))) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d translate fd failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2018-08-29 04:46:25 +08:00
|
|
|
return_error_param = ret;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:49 +08:00
|
|
|
goto err_translate_failed;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} break;
|
2017-02-04 06:40:52 +08:00
|
|
|
case BINDER_TYPE_FDA: {
|
2019-02-09 02:35:17 +08:00
|
|
|
struct binder_object ptr_object;
|
|
|
|
binder_size_t parent_offset;
|
2021-12-01 02:51:51 +08:00
|
|
|
struct binder_object user_object;
|
|
|
|
size_t user_parent_size;
|
2017-02-04 06:40:52 +08:00
|
|
|
struct binder_fd_array_object *fda =
|
|
|
|
to_binder_fd_array_object(hdr);
|
2019-12-14 04:25:31 +08:00
|
|
|
size_t num_valid = (buffer_offset - off_start_offset) /
|
2019-02-09 02:35:20 +08:00
|
|
|
sizeof(binder_size_t);
|
2017-02-04 06:40:52 +08:00
|
|
|
struct binder_buffer_object *parent =
|
2019-02-09 02:35:17 +08:00
|
|
|
binder_validate_ptr(target_proc, t->buffer,
|
|
|
|
&ptr_object, fda->parent,
|
|
|
|
off_start_offset,
|
|
|
|
&parent_offset,
|
2019-02-09 02:35:20 +08:00
|
|
|
num_valid);
|
2017-02-04 06:40:52 +08:00
|
|
|
if (!parent) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:52 +08:00
|
|
|
goto err_bad_parent;
|
|
|
|
}
|
2019-02-09 02:35:17 +08:00
|
|
|
if (!binder_validate_fixup(target_proc, t->buffer,
|
|
|
|
off_start_offset,
|
|
|
|
parent_offset,
|
|
|
|
fda->parent_offset,
|
|
|
|
last_fixup_obj_off,
|
2017-02-04 06:40:52 +08:00
|
|
|
last_fixup_min_off)) {
|
|
|
|
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:52 +08:00
|
|
|
goto err_bad_parent;
|
|
|
|
}
|
2021-12-01 02:51:51 +08:00
|
|
|
/*
|
|
|
|
* We need to read the user version of the parent
|
|
|
|
* object to get the original user offset
|
|
|
|
*/
|
|
|
|
user_parent_size =
|
|
|
|
binder_get_object(proc, user_buffer, t->buffer,
|
|
|
|
parent_offset, &user_object);
|
|
|
|
if (user_parent_size != sizeof(user_object.bbo)) {
|
|
|
|
binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
user_parent_size,
|
|
|
|
sizeof(user_object.bbo));
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_bad_parent;
|
|
|
|
}
|
2021-12-01 02:51:52 +08:00
|
|
|
ret = binder_translate_fd_array(&pf_head, fda,
|
|
|
|
user_buffer, parent,
|
2021-12-01 02:51:51 +08:00
|
|
|
&user_object.bbo, t,
|
|
|
|
thread, in_reply_to);
|
2021-12-01 02:51:50 +08:00
|
|
|
if (!ret)
|
|
|
|
ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
object_offset,
|
|
|
|
fda, sizeof(*fda));
|
|
|
|
if (ret) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d translate fd array failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2017-02-04 06:40:52 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2021-12-01 02:51:50 +08:00
|
|
|
return_error_param = ret > 0 ? -EINVAL : ret;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:52 +08:00
|
|
|
goto err_translate_failed;
|
|
|
|
}
|
2019-02-09 02:35:17 +08:00
|
|
|
last_fixup_obj_off = parent_offset;
|
2017-02-04 06:40:52 +08:00
|
|
|
last_fixup_min_off =
|
|
|
|
fda->parent_offset + sizeof(u32) * fda->num_fds;
|
|
|
|
} break;
|
2017-02-04 06:40:51 +08:00
|
|
|
case BINDER_TYPE_PTR: {
|
|
|
|
struct binder_buffer_object *bp =
|
|
|
|
to_binder_buffer_object(hdr);
|
2019-02-09 02:35:20 +08:00
|
|
|
size_t buf_left = sg_buf_end_offset - sg_buf_offset;
|
|
|
|
size_t num_valid;
|
2017-02-04 06:40:51 +08:00
|
|
|
|
|
|
|
if (bp->length > buf_left) {
|
|
|
|
binder_user_error("%d:%d got transaction with too large buffer\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:51 +08:00
|
|
|
goto err_bad_offset;
|
|
|
|
}
|
2021-12-01 02:51:52 +08:00
|
|
|
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
|
|
|
|
(const void __user *)(uintptr_t)bp->buffer,
|
|
|
|
bp->length);
|
|
|
|
if (ret) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d deferred copy failed\n",
|
|
|
|
thread->pid, proc->pid);
|
2017-02-04 06:40:51 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2021-12-01 02:51:52 +08:00
|
|
|
return_error_param = ret;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_line = __LINE__;
|
2021-12-01 02:51:52 +08:00
|
|
|
goto err_translate_failed;
|
2017-02-04 06:40:51 +08:00
|
|
|
}
|
|
|
|
/* Fixup buffer pointer to target proc address space */
|
2019-02-09 02:35:20 +08:00
|
|
|
bp->buffer = (uintptr_t)
|
|
|
|
t->buffer->user_data + sg_buf_offset;
|
|
|
|
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
|
2017-02-04 06:40:51 +08:00
|
|
|
|
2019-12-14 04:25:31 +08:00
|
|
|
num_valid = (buffer_offset - off_start_offset) /
|
2019-02-09 02:35:20 +08:00
|
|
|
sizeof(binder_size_t);
|
2021-12-01 02:51:52 +08:00
|
|
|
ret = binder_fixup_parent(&pf_head, t,
|
|
|
|
thread, bp,
|
2019-02-09 02:35:17 +08:00
|
|
|
off_start_offset,
|
2019-02-09 02:35:20 +08:00
|
|
|
num_valid,
|
2019-02-09 02:35:17 +08:00
|
|
|
last_fixup_obj_off,
|
2017-02-04 06:40:51 +08:00
|
|
|
last_fixup_min_off);
|
2019-06-29 00:50:12 +08:00
|
|
|
if (ret < 0 ||
|
|
|
|
binder_alloc_copy_to_buffer(&target_proc->alloc,
|
|
|
|
t->buffer,
|
|
|
|
object_offset,
|
|
|
|
bp, sizeof(*bp))) {
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d failed to fixup parent\n",
|
|
|
|
thread->pid, proc->pid);
|
2017-02-04 06:40:51 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = ret;
|
|
|
|
return_error_line = __LINE__;
|
2017-02-04 06:40:51 +08:00
|
|
|
goto err_translate_failed;
|
|
|
|
}
|
2019-02-09 02:35:17 +08:00
|
|
|
last_fixup_obj_off = object_offset;
|
2017-02-04 06:40:51 +08:00
|
|
|
last_fixup_min_off = 0;
|
|
|
|
} break;
|
2011-11-30 19:18:14 +08:00
|
|
|
default:
|
2013-07-04 17:54:48 +08:00
|
|
|
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
|
2017-02-04 06:40:45 +08:00
|
|
|
proc->pid, thread->pid, hdr->type);
|
2011-11-30 19:18:14 +08:00
|
|
|
return_error = BR_FAILED_REPLY;
|
2017-06-30 03:01:46 +08:00
|
|
|
return_error_param = -EINVAL;
|
|
|
|
return_error_line = __LINE__;
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err_bad_object_type;
|
|
|
|
}
|
|
|
|
}
|
2021-12-01 02:51:50 +08:00
|
|
|
/* Done processing objects, copy the rest of the buffer */
|
|
|
|
if (binder_alloc_copy_user_to_buffer(
|
|
|
|
&target_proc->alloc,
|
|
|
|
t->buffer, user_offset,
|
|
|
|
user_buffer + user_offset,
|
|
|
|
tr->data_size - user_offset)) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = -EFAULT;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_copy_data_failed;
|
|
|
|
}
|
2021-12-01 02:51:52 +08:00
|
|
|
|
|
|
|
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
|
|
|
|
&sgc_head, &pf_head);
|
|
|
|
if (ret) {
|
|
|
|
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
|
return_error_param = ret;
|
|
|
|
return_error_line = __LINE__;
|
|
|
|
goto err_copy_data_failed;
|
|
|
|
}
|
2021-04-09 17:40:46 +08:00
|
|
|
if (t->buffer->oneway_spam_suspect)
|
|
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
|
|
|
|
else
|
|
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
2017-06-30 03:02:03 +08:00
|
|
|
t->work.type = BINDER_WORK_TRANSACTION;
|
2017-06-30 03:01:48 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (reply) {
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work(thread, tcomplete);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(target_proc);
|
binder: fix freeze race
Currently cgroup freezer is used to freeze the application threads, and
BINDER_FREEZE is used to freeze the corresponding binder interface.
There's already a mechanism in ioctl(BINDER_FREEZE) to wait for any
existing transactions to drain out before actually freezing the binder
interface.
But freezing an app requires 2 steps, freezing the binder interface with
ioctl(BINDER_FREEZE) and then freezing the application main threads with
cgroupfs. This is not an atomic operation. The following race issue
might happen.
1) Binder interface is frozen by ioctl(BINDER_FREEZE);
2) Main thread A initiates a new sync binder transaction to process B;
3) Main thread A is frozen by "echo 1 > cgroup.freeze";
4) The response from process B reaches the frozen thread, which will
unexpectedly fail.
This patch provides a mechanism to check if there's any new pending
transaction happening between ioctl(BINDER_FREEZE) and freezing the
main thread. If there's any, the main thread freezing operation can
be rolled back to finish the pending transaction.
Furthermore, the response might reach the binder driver before the
rollback actually happens. That will still cause failed transaction.
As the other process doesn't wait for another response of the response,
the response transaction failure can be fixed by treating the response
transaction like an oneway/async one, allowing it to reach the frozen
thread. And it will be consumed when the thread gets unfrozen later.
NOTE: This patch reuses the existing definition of struct
binder_frozen_status_info but expands the bit assignments of __u32
member sync_recv.
To ensure backward compatibility, bit 0 of sync_recv still indicates
there's an outstanding sync binder transaction. This patch adds new
information to bit 1 of sync_recv, indicating the binder transaction
happens exactly when there's a race.
If an existing userspace app runs on a new kernel, a sync binder call
will set bit 0 of sync_recv so ioctl(BINDER_GET_FROZEN_INFO) still
return the expected value (true). The app just doesn't check bit 1
intentionally so it doesn't have the ability to tell if there's a race.
This behavior is aligned with what happens on an old kernel which
doesn't set bit 1 at all.
A new userspace app can 1) check bit 0 to know if there's a sync binder
transaction happened when being frozen - same as before; and 2) check
bit 1 to know if that sync binder transaction happened exactly when
there's a race - a new information for rollback decision.
the same time, confirmed the pending transactions succeeded.
Fixes: 432ff1e91694 ("binder: BINDER_FREEZE ioctl")
Acked-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Li Li <dualli@google.com>
Test: stress test with apps being frozen and initiating binder calls at
Link: https://lore.kernel.org/r/20210910164210.2282716-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-09-11 00:42:10 +08:00
|
|
|
if (target_thread->is_dead) {
|
|
|
|
return_error = BR_DEAD_REPLY;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(target_proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
goto err_dead_proc_or_thread;
|
2017-06-30 03:02:06 +08:00
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
2021-03-16 09:16:28 +08:00
|
|
|
target_proc->outstanding_txns++;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(target_proc);
|
2017-08-31 16:04:19 +08:00
|
|
|
wake_up_interruptible_sync(&target_thread->wait);
|
2017-06-30 03:01:54 +08:00
|
|
|
binder_free_transaction(in_reply_to);
|
2011-11-30 19:18:14 +08:00
|
|
|
} else if (!(t->flags & TF_ONE_WAY)) {
|
|
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-11-15 16:21:35 +08:00
|
|
|
/*
|
|
|
|
* Defer the TRANSACTION_COMPLETE, so we don't return to
|
|
|
|
* userspace immediately; this allows the target process to
|
|
|
|
* immediately start processing this transaction, reducing
|
|
|
|
* latency. We will then return the TRANSACTION_COMPLETE when
|
|
|
|
* the target replies (or there is an error).
|
|
|
|
*/
|
|
|
|
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
|
2011-11-30 19:18:14 +08:00
|
|
|
t->need_reply = 1;
|
|
|
|
t->from_parent = thread->transaction_stack;
|
|
|
|
thread->transaction_stack = t;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2021-03-16 09:16:28 +08:00
|
|
|
return_error = binder_proc_transaction(t,
|
|
|
|
target_proc, target_thread);
|
|
|
|
if (return_error) {
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
binder_pop_transaction_ilocked(thread, t);
|
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
goto err_dead_proc_or_thread;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
|
|
|
BUG_ON(target_node == NULL);
|
|
|
|
BUG_ON(t->buffer->async_transaction != 1);
|
2021-03-16 09:16:28 +08:00
|
|
|
return_error = binder_proc_transaction(t, target_proc, NULL);
|
2022-11-24 04:16:54 +08:00
|
|
|
/*
|
|
|
|
* Let the caller know when async transaction reaches a frozen
|
|
|
|
* process and is put in a pending queue, waiting for the target
|
|
|
|
* process to be unfrozen.
|
|
|
|
*/
|
|
|
|
if (return_error == BR_TRANSACTION_PENDING_FROZEN)
|
|
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
|
|
|
|
binder_enqueue_thread_work(thread, tcomplete);
|
|
|
|
if (return_error &&
|
|
|
|
return_error != BR_TRANSACTION_PENDING_FROZEN)
|
2017-06-30 03:01:57 +08:00
|
|
|
goto err_dead_proc_or_thread;
|
2017-06-30 03:01:37 +08:00
|
|
|
}
|
2017-06-30 03:01:57 +08:00
|
|
|
if (target_thread)
|
|
|
|
binder_thread_dec_tmpref(target_thread);
|
|
|
|
binder_proc_dec_tmpref(target_proc);
|
2017-09-30 06:39:49 +08:00
|
|
|
if (target_node)
|
|
|
|
binder_dec_node_tmpref(target_node);
|
2017-06-30 03:01:53 +08:00
|
|
|
/*
|
|
|
|
* write barrier to synchronize with initialization
|
|
|
|
* of log entry
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
WRITE_ONCE(e->debug_id_done, t_debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
return;
|
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
err_dead_proc_or_thread:
|
2022-04-30 07:56:44 +08:00
|
|
|
binder_txn_error("%d:%d dead process or thread\n",
|
|
|
|
thread->pid, proc->pid);
|
2017-06-30 03:01:57 +08:00
|
|
|
return_error_line = __LINE__;
|
2017-09-06 01:21:52 +08:00
|
|
|
binder_dequeue_work(proc, tcomplete);
|
2017-02-04 06:40:49 +08:00
|
|
|
err_translate_failed:
|
2011-11-30 19:18:14 +08:00
|
|
|
err_bad_object_type:
|
|
|
|
err_bad_offset:
|
2017-02-04 06:40:52 +08:00
|
|
|
err_bad_parent:
|
2011-11-30 19:18:14 +08:00
|
|
|
err_copy_data_failed:
|
2021-12-01 02:51:52 +08:00
|
|
|
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
|
2018-08-29 04:46:25 +08:00
|
|
|
binder_free_txn_fixups(t);
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_transaction_failed_buffer_release(t->buffer);
|
2021-08-31 03:51:46 +08:00
|
|
|
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
|
2019-02-09 02:35:20 +08:00
|
|
|
buffer_offset, true);
|
2017-09-30 06:39:49 +08:00
|
|
|
if (target_node)
|
|
|
|
binder_dec_node_tmpref(target_node);
|
2017-06-30 03:01:56 +08:00
|
|
|
target_node = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
t->buffer->transaction = NULL;
|
2017-06-30 03:01:40 +08:00
|
|
|
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
2011-11-30 19:18:14 +08:00
|
|
|
err_binder_alloc_buf_failed:
|
2019-04-25 03:31:18 +08:00
|
|
|
err_bad_extra_size:
|
2019-01-15 01:10:21 +08:00
|
|
|
if (secctx)
|
|
|
|
security_release_secctx(secctx, secctx_sz);
|
|
|
|
err_get_secctx_failed:
|
2011-11-30 19:18:14 +08:00
|
|
|
kfree(tcomplete);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
|
err_alloc_tcomplete_failed:
|
2020-11-11 11:02:43 +08:00
|
|
|
if (trace_binder_txn_latency_free_enabled())
|
|
|
|
binder_txn_latency_free(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
kfree(t);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
|
|
err_alloc_t_failed:
|
android: binder: no outgoing transaction when thread todo has transaction
When a process dies, failed reply is sent to the sender of any transaction
queued on a dead thread's todo list. The sender asserts that the
received failed reply corresponds to the head of the transaction stack.
This assert can fail if the dead thread is allowed to send outgoing
transactions when there is already a transaction on its todo list,
because this new transaction can end up on the transaction stack of the
original sender. The following steps illustrate how this assertion can
fail.
1. Thread1 sends txn19 to Thread2
(T1->transaction_stack=txn19, T2->todo+=txn19)
2. Without processing todo list, Thread2 sends txn20 to Thread1
(T1->todo+=txn20, T2->transaction_stack=txn20)
3. T1 processes txn20 on its todo list
(T1->transaction_stack=txn20->txn19, T1->todo=<empty>)
4. T2 dies, T2->todo cleanup attempts to send failed reply for txn19, but
T1->transaction_stack points to txn20 -- assertion failes
Step 2. is the incorrect behavior. When there is a transaction on a
thread's todo list, this thread should not be able to send any outgoing
synchronous transactions. Only the head of the todo list needs to be
checked because only threads that are waiting for proc work can directly
receive work from another thread, and no work is allowed to be queued
on such a thread without waking up the thread. This patch also enforces
that a thread is not waiting for proc work when a work is directly
enqueued to its todo list.
Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-08-14 08:28:53 +08:00
|
|
|
err_bad_todo_list:
|
2011-11-30 19:18:14 +08:00
|
|
|
err_bad_call_stack:
|
|
|
|
err_empty_call_stack:
|
|
|
|
err_dead_binder:
|
|
|
|
err_invalid_target_handle:
|
2017-09-30 06:39:49 +08:00
|
|
|
if (target_node) {
|
2017-06-30 03:01:56 +08:00
|
|
|
binder_dec_node(target_node, 1, 0);
|
2017-09-30 06:39:49 +08:00
|
|
|
binder_dec_node_tmpref(target_node);
|
|
|
|
}
|
2017-06-30 03:01:56 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
2022-04-30 07:56:40 +08:00
|
|
|
"%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
|
|
|
|
proc->pid, thread->pid, reply ? "reply" :
|
|
|
|
(tr->flags & TF_ONE_WAY ? "async" : "call"),
|
|
|
|
target_proc ? target_proc->pid : 0,
|
|
|
|
target_thread ? target_thread->pid : 0,
|
|
|
|
t_debug_id, return_error, return_error_param,
|
2017-06-30 03:01:46 +08:00
|
|
|
(u64)tr->data_size, (u64)tr->offsets_size,
|
|
|
|
return_error_line);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2022-05-18 02:58:17 +08:00
|
|
|
if (target_thread)
|
|
|
|
binder_thread_dec_tmpref(target_thread);
|
|
|
|
if (target_proc)
|
|
|
|
binder_proc_dec_tmpref(target_proc);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_transaction_log_entry *fe;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:01:46 +08:00
|
|
|
e->return_error = return_error;
|
|
|
|
e->return_error_param = return_error_param;
|
|
|
|
e->return_error_line = return_error_line;
|
2011-11-30 19:18:14 +08:00
|
|
|
fe = binder_transaction_log_add(&binder_transaction_log_failed);
|
|
|
|
*fe = *e;
|
2017-06-30 03:01:53 +08:00
|
|
|
/*
|
|
|
|
* write barrier to synchronize with initialization
|
|
|
|
* of log entry
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
WRITE_ONCE(e->debug_id_done, t_debug_id);
|
|
|
|
WRITE_ONCE(fe->debug_id_done, t_debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:55 +08:00
|
|
|
BUG_ON(thread->return_error.cmd != BR_OK);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (in_reply_to) {
|
2022-04-30 07:56:41 +08:00
|
|
|
binder_set_txn_from_error(in_reply_to, t_debug_id,
|
|
|
|
return_error, return_error_param);
|
2017-06-30 03:01:55 +08:00
|
|
|
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_send_failed_reply(in_reply_to, return_error);
|
2017-06-30 03:01:55 +08:00
|
|
|
} else {
|
2022-04-30 07:56:41 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
binder_set_extended_error(&thread->ee, t_debug_id,
|
|
|
|
return_error, return_error_param);
|
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:55 +08:00
|
|
|
thread->return_error.cmd = return_error;
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
2017-06-30 03:01:55 +08:00
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
/**
|
|
|
|
* binder_free_buf() - free the specified buffer
|
|
|
|
* @proc: binder proc that owns buffer
|
|
|
|
* @buffer: buffer to be freed
|
2021-10-16 07:38:11 +08:00
|
|
|
* @is_failure: failed to send transaction
|
2018-08-29 04:46:25 +08:00
|
|
|
*
|
|
|
|
* If buffer for an async transaction, enqueue the next async
|
|
|
|
* transaction from the node.
|
|
|
|
*
|
|
|
|
* Cleanup buffer and free it.
|
|
|
|
*/
|
2018-09-25 22:30:36 +08:00
|
|
|
static void
|
2021-08-31 03:51:46 +08:00
|
|
|
binder_free_buf(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
2021-10-16 07:38:11 +08:00
|
|
|
struct binder_buffer *buffer, bool is_failure)
|
2018-08-29 04:46:25 +08:00
|
|
|
{
|
2019-06-13 04:29:27 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2018-08-29 04:46:25 +08:00
|
|
|
if (buffer->transaction) {
|
|
|
|
buffer->transaction->buffer = NULL;
|
|
|
|
buffer->transaction = NULL;
|
|
|
|
}
|
2019-06-13 04:29:27 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2018-08-29 04:46:25 +08:00
|
|
|
if (buffer->async_transaction && buffer->target_node) {
|
|
|
|
struct binder_node *buf_node;
|
|
|
|
struct binder_work *w;
|
|
|
|
|
|
|
|
buf_node = buffer->target_node;
|
|
|
|
binder_node_inner_lock(buf_node);
|
|
|
|
BUG_ON(!buf_node->has_async_transaction);
|
|
|
|
BUG_ON(buf_node->proc != proc);
|
|
|
|
w = binder_dequeue_work_head_ilocked(
|
|
|
|
&buf_node->async_todo);
|
|
|
|
if (!w) {
|
|
|
|
buf_node->has_async_transaction = false;
|
|
|
|
} else {
|
|
|
|
binder_enqueue_work_ilocked(
|
|
|
|
w, &proc->todo);
|
|
|
|
binder_wakeup_proc_ilocked(proc);
|
|
|
|
}
|
|
|
|
binder_node_inner_unlock(buf_node);
|
|
|
|
}
|
|
|
|
trace_binder_transaction_buffer_release(buffer);
|
2021-10-16 07:38:11 +08:00
|
|
|
binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
|
2018-08-29 04:46:25 +08:00
|
|
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
|
|
|
}
|
|
|
|
|
2013-09-02 14:18:40 +08:00
|
|
|
static int binder_thread_write(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
|
|
binder_size_t *consumed)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
uint32_t cmd;
|
2017-02-04 06:40:46 +08:00
|
|
|
struct binder_context *context = proc->context;
|
2014-02-22 06:40:26 +08:00
|
|
|
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
2011-11-30 19:18:14 +08:00
|
|
|
void __user *ptr = buffer + *consumed;
|
|
|
|
void __user *end = buffer + size;
|
|
|
|
|
2017-06-30 03:01:55 +08:00
|
|
|
while (ptr < end && thread->return_error.cmd == BR_OK) {
|
2017-06-30 03:01:58 +08:00
|
|
|
int ret;
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (get_user(cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_command(cmd);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
|
2017-06-30 03:01:44 +08:00
|
|
|
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
|
|
|
|
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
|
|
|
|
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
switch (cmd) {
|
|
|
|
case BC_INCREFS:
|
|
|
|
case BC_ACQUIRE:
|
|
|
|
case BC_RELEASE:
|
|
|
|
case BC_DECREFS: {
|
|
|
|
uint32_t target;
|
|
|
|
const char *debug_string;
|
2017-06-30 03:01:58 +08:00
|
|
|
bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
|
|
|
|
bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
|
|
|
|
struct binder_ref_data rdata;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
2017-06-30 03:01:43 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
ptr += sizeof(uint32_t);
|
2017-06-30 03:01:58 +08:00
|
|
|
ret = -1;
|
|
|
|
if (increment && !target) {
|
2017-06-30 03:01:43 +08:00
|
|
|
struct binder_node *ctx_mgr_node;
|
2020-10-28 06:56:55 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
|
|
ctx_mgr_node = context->binder_context_mgr_node;
|
binder: Prevent context manager from incrementing ref 0
Binder is designed such that a binder_proc never has references to
itself. If this rule is violated, memory corruption can occur when a
process sends a transaction to itself; see e.g.
<https://syzkaller.appspot.com/bug?extid=09e05aba06723a94d43d>.
There is a remaining edgecase through which such a transaction-to-self
can still occur from the context of a task with BINDER_SET_CONTEXT_MGR
access:
- task A opens /dev/binder twice, creating binder_proc instances P1
and P2
- P1 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 0 in its
handle table
- P1 dies (by closing the /dev/binder fd and waiting a bit)
- P2 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 1 in its
handle table
[this triggers a warning: "binder: 1974:1974 tried to acquire
reference to desc 0, got 1 instead"]
- task B opens /dev/binder once, creating binder_proc instance P3
- P3 calls P2 (via magic handle 0) with (void*)1 as argument (two-way
transaction)
- P2 receives the handle and uses it to call P3 (two-way transaction)
- P3 calls P2 (via magic handle 0) (two-way transaction)
- P2 calls P2 (via handle 1) (two-way transaction)
And then, if P2 does *NOT* accept the incoming transaction work, but
instead closes the binder fd, we get a crash.
Solve it by preventing the context manager from using ACQUIRE on ref 0.
There shouldn't be any legitimate reason for the context manager to do
that.
Additionally, print a warning if someone manages to find another way to
trigger a transaction-to-self bug in the future.
Cc: stable@vger.kernel.org
Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Jann Horn <jannh@google.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Link: https://lore.kernel.org/r/20200727120424.1627555-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-07-27 20:04:24 +08:00
|
|
|
if (ctx_mgr_node) {
|
|
|
|
if (ctx_mgr_node->proc == proc) {
|
|
|
|
binder_user_error("%d:%d context manager tried to acquire desc 0\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
ret = binder_inc_ref_for_node(
|
|
|
|
proc, ctx_mgr_node,
|
|
|
|
strong, NULL, &rdata);
|
binder: Prevent context manager from incrementing ref 0
Binder is designed such that a binder_proc never has references to
itself. If this rule is violated, memory corruption can occur when a
process sends a transaction to itself; see e.g.
<https://syzkaller.appspot.com/bug?extid=09e05aba06723a94d43d>.
There is a remaining edgecase through which such a transaction-to-self
can still occur from the context of a task with BINDER_SET_CONTEXT_MGR
access:
- task A opens /dev/binder twice, creating binder_proc instances P1
and P2
- P1 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 0 in its
handle table
- P1 dies (by closing the /dev/binder fd and waiting a bit)
- P2 becomes context manager
- P2 calls ACQUIRE on the magic handle 0, allocating index 1 in its
handle table
[this triggers a warning: "binder: 1974:1974 tried to acquire
reference to desc 0, got 1 instead"]
- task B opens /dev/binder once, creating binder_proc instance P3
- P3 calls P2 (via magic handle 0) with (void*)1 as argument (two-way
transaction)
- P2 receives the handle and uses it to call P3 (two-way transaction)
- P3 calls P2 (via magic handle 0) (two-way transaction)
- P2 calls P2 (via handle 1) (two-way transaction)
And then, if P2 does *NOT* accept the incoming transaction work, but
instead closes the binder fd, we get a crash.
Solve it by preventing the context manager from using ACQUIRE on ref 0.
There shouldn't be any legitimate reason for the context manager to do
that.
Additionally, print a warning if someone manages to find another way to
trigger a transaction-to-self bug in the future.
Cc: stable@vger.kernel.org
Fixes: 457b9a6f09f0 ("Staging: android: add binder driver")
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Jann Horn <jannh@google.com>
Reviewed-by: Martijn Coenen <maco@android.com>
Link: https://lore.kernel.org/r/20200727120424.1627555-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-07-27 20:04:24 +08:00
|
|
|
}
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
|
|
}
|
2017-06-30 03:01:58 +08:00
|
|
|
if (ret)
|
|
|
|
ret = binder_update_ref_for_handle(
|
|
|
|
proc, target, increment, strong,
|
|
|
|
&rdata);
|
|
|
|
if (!ret && rdata.desc != target) {
|
|
|
|
binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
target, rdata.desc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
switch (cmd) {
|
|
|
|
case BC_INCREFS:
|
|
|
|
debug_string = "IncRefs";
|
|
|
|
break;
|
|
|
|
case BC_ACQUIRE:
|
|
|
|
debug_string = "Acquire";
|
|
|
|
break;
|
|
|
|
case BC_RELEASE:
|
|
|
|
debug_string = "Release";
|
|
|
|
break;
|
|
|
|
case BC_DECREFS:
|
|
|
|
default:
|
|
|
|
debug_string = "DecRefs";
|
2017-06-30 03:01:58 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
|
|
|
|
proc->pid, thread->pid, debug_string,
|
|
|
|
strong, target, ret);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
2017-06-30 03:01:58 +08:00
|
|
|
"%d:%d %s ref %d desc %d s %d w %d\n",
|
|
|
|
proc->pid, thread->pid, debug_string,
|
|
|
|
rdata.debug_id, rdata.desc, rdata.strong,
|
|
|
|
rdata.weak);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BC_INCREFS_DONE:
|
|
|
|
case BC_ACQUIRE_DONE: {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t node_ptr;
|
|
|
|
binder_uintptr_t cookie;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_node *node;
|
2017-06-30 03:02:03 +08:00
|
|
|
bool free_node;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2014-02-22 06:40:26 +08:00
|
|
|
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2014-02-22 06:40:26 +08:00
|
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2014-02-22 06:40:26 +08:00
|
|
|
ptr += sizeof(binder_uintptr_t);
|
2011-11-30 19:18:14 +08:00
|
|
|
node = binder_get_node(proc, node_ptr);
|
|
|
|
if (node == NULL) {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_user_error("%d:%d %s u%016llx no match\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BC_INCREFS_DONE ?
|
|
|
|
"BC_INCREFS_DONE" :
|
|
|
|
"BC_ACQUIRE_DONE",
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)node_ptr);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (cookie != node->cookie) {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BC_INCREFS_DONE ?
|
|
|
|
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)node_ptr, node->debug_id,
|
|
|
|
(u64)cookie, (u64)node->cookie);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_lock(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (cmd == BC_ACQUIRE_DONE) {
|
|
|
|
if (node->pending_strong_ref == 0) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
node->debug_id);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
node->pending_strong_ref = 0;
|
|
|
|
} else {
|
|
|
|
if (node->pending_weak_ref == 0) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
node->debug_id);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
node->pending_weak_ref = 0;
|
|
|
|
}
|
2017-06-30 03:02:03 +08:00
|
|
|
free_node = binder_dec_node_nilocked(node,
|
|
|
|
cmd == BC_ACQUIRE_DONE, 0);
|
|
|
|
WARN_ON(free_node);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
2017-06-30 03:01:59 +08:00
|
|
|
"%d:%d %s node %d ls %d lw %d tr %d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
2017-06-30 03:01:59 +08:00
|
|
|
node->debug_id, node->local_strong_refs,
|
|
|
|
node->local_weak_refs, node->tmp_refs);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_inner_unlock(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BC_ATTEMPT_ACQUIRE:
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
case BC_ACQUIRE_RESULT:
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_err("BC_ACQUIRE_RESULT not supported\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
case BC_FREE_BUFFER: {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t data_ptr;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_buffer *buffer;
|
|
|
|
|
2014-02-22 06:40:26 +08:00
|
|
|
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2014-02-22 06:40:26 +08:00
|
|
|
ptr += sizeof(binder_uintptr_t);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:51 +08:00
|
|
|
buffer = binder_alloc_prepare_to_free(&proc->alloc,
|
|
|
|
data_ptr);
|
2018-11-07 07:55:32 +08:00
|
|
|
if (IS_ERR_OR_NULL(buffer)) {
|
|
|
|
if (PTR_ERR(buffer) == -EPERM) {
|
|
|
|
binder_user_error(
|
|
|
|
"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)data_ptr);
|
|
|
|
} else {
|
|
|
|
binder_user_error(
|
|
|
|
"%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)data_ptr);
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_FREE_BUFFER,
|
2014-02-22 06:40:26 +08:00
|
|
|
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
|
|
|
|
proc->pid, thread->pid, (u64)data_ptr,
|
|
|
|
buffer->debug_id,
|
2011-11-30 19:18:14 +08:00
|
|
|
buffer->transaction ? "active" : "finished");
|
2021-10-16 07:38:11 +08:00
|
|
|
binder_free_buf(proc, thread, buffer, false);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-02-04 06:40:51 +08:00
|
|
|
case BC_TRANSACTION_SG:
|
|
|
|
case BC_REPLY_SG: {
|
|
|
|
struct binder_transaction_data_sg tr;
|
|
|
|
|
|
|
|
if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(tr);
|
|
|
|
binder_transaction(proc, thread, &tr.transaction_data,
|
|
|
|
cmd == BC_REPLY_SG, tr.buffers_size);
|
|
|
|
break;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
case BC_TRANSACTION:
|
|
|
|
case BC_REPLY: {
|
|
|
|
struct binder_transaction_data tr;
|
|
|
|
|
|
|
|
if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(tr);
|
2017-02-04 06:40:50 +08:00
|
|
|
binder_transaction(proc, thread, &tr,
|
|
|
|
cmd == BC_REPLY, 0);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case BC_REGISTER_LOOPER:
|
|
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d:%d BC_REGISTER_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
2017-06-30 03:02:07 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
} else if (proc->requested_threads == 0) {
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
} else {
|
|
|
|
proc->requested_threads--;
|
|
|
|
proc->requested_threads_started++;
|
|
|
|
}
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
|
2017-06-30 03:02:07 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
case BC_ENTER_LOOPER:
|
|
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d:%d BC_ENTER_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
}
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
|
|
|
|
break;
|
|
|
|
case BC_EXIT_LOOPER:
|
|
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d:%d BC_EXIT_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_EXITED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BC_REQUEST_DEATH_NOTIFICATION:
|
|
|
|
case BC_CLEAR_DEATH_NOTIFICATION: {
|
|
|
|
uint32_t target;
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t cookie;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_ref *ref;
|
2017-06-30 03:02:08 +08:00
|
|
|
struct binder_ref_death *death = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
2014-02-22 06:40:26 +08:00
|
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2014-02-22 06:40:26 +08:00
|
|
|
ptr += sizeof(binder_uintptr_t);
|
2017-06-30 03:02:08 +08:00
|
|
|
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
|
|
/*
|
|
|
|
* Allocate memory for death notification
|
|
|
|
* before taking lock
|
|
|
|
*/
|
|
|
|
death = kzalloc(sizeof(*death), GFP_KERNEL);
|
|
|
|
if (death == NULL) {
|
|
|
|
WARN_ON(thread->return_error.cmd !=
|
|
|
|
BR_OK);
|
|
|
|
thread->return_error.cmd = BR_ERROR;
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work(
|
|
|
|
thread,
|
|
|
|
&thread->return_error.work);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_debug(
|
|
|
|
BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
|
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
|
|
|
proc->pid, thread->pid);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
binder_proc_lock(proc);
|
|
|
|
ref = binder_get_ref_olocked(proc, target, false);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (ref == NULL) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d %s invalid ref %d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
|
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
|
|
target);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
|
|
|
kfree(death);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
2014-02-22 06:40:26 +08:00
|
|
|
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
|
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
2017-06-30 03:01:58 +08:00
|
|
|
(u64)cookie, ref->data.debug_id,
|
|
|
|
ref->data.desc, ref->data.strong,
|
|
|
|
ref->data.weak, ref->node->debug_id);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:10 +08:00
|
|
|
binder_node_lock(ref->node);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
|
|
if (ref->death) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
2017-06-30 03:02:10 +08:00
|
|
|
binder_node_unlock(ref->node);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
|
|
|
kfree(death);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
binder_stats_created(BINDER_STAT_DEATH);
|
|
|
|
INIT_LIST_HEAD(&death->work.entry);
|
|
|
|
death->cookie = cookie;
|
|
|
|
ref->death = death;
|
|
|
|
if (ref->node->proc == NULL) {
|
|
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
2017-08-31 16:04:28 +08:00
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
binder_enqueue_work_ilocked(
|
|
|
|
&ref->death->work, &proc->todo);
|
|
|
|
binder_wakeup_proc_ilocked(proc);
|
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ref->death == NULL) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(ref->node);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
death = ref->death;
|
|
|
|
if (death->cookie != cookie) {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
2014-02-22 06:40:26 +08:00
|
|
|
(u64)death->cookie,
|
|
|
|
(u64)cookie);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(ref->node);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
ref->death = NULL;
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (list_empty(&death->work.entry)) {
|
|
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
2017-06-30 03:02:02 +08:00
|
|
|
if (thread->looper &
|
|
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
|
|
BINDER_LOOPER_STATE_ENTERED))
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work_ilocked(
|
|
|
|
thread,
|
|
|
|
&death->work);
|
2017-06-30 03:02:02 +08:00
|
|
|
else {
|
|
|
|
binder_enqueue_work_ilocked(
|
|
|
|
&death->work,
|
|
|
|
&proc->todo);
|
2017-08-31 16:04:18 +08:00
|
|
|
binder_wakeup_proc_ilocked(
|
2017-08-31 16:04:19 +08:00
|
|
|
proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
|
|
|
|
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
|
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:10 +08:00
|
|
|
binder_node_unlock(ref->node);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
|
|
|
case BC_DEAD_BINDER_DONE: {
|
|
|
|
struct binder_work *w;
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t cookie;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_ref_death *death = NULL;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2014-02-22 06:40:26 +08:00
|
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2016-02-17 09:32:52 +08:00
|
|
|
ptr += sizeof(cookie);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
list_for_each_entry(w, &proc->delivered_death,
|
|
|
|
entry) {
|
|
|
|
struct binder_ref_death *tmp_death =
|
|
|
|
container_of(w,
|
|
|
|
struct binder_ref_death,
|
|
|
|
work);
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (tmp_death->cookie == cookie) {
|
|
|
|
death = tmp_death;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
2018-02-08 05:57:37 +08:00
|
|
|
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
|
2014-02-22 06:40:26 +08:00
|
|
|
proc->pid, thread->pid, (u64)cookie,
|
|
|
|
death);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (death == NULL) {
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
|
|
|
|
proc->pid, thread->pid, (u64)cookie);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_dequeue_work_ilocked(&death->work);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
|
|
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
2017-06-30 03:02:02 +08:00
|
|
|
if (thread->looper &
|
|
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
|
|
BINDER_LOOPER_STATE_ENTERED))
|
2017-11-15 16:21:35 +08:00
|
|
|
binder_enqueue_thread_work_ilocked(
|
|
|
|
thread, &death->work);
|
2017-06-30 03:02:02 +08:00
|
|
|
else {
|
|
|
|
binder_enqueue_work_ilocked(
|
|
|
|
&death->work,
|
|
|
|
&proc->todo);
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_wakeup_proc_ilocked(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
|
|
|
|
|
|
|
default:
|
2022-05-10 07:19:01 +08:00
|
|
|
pr_err("%d:%d unknown command %u\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, cmd);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
*consumed = ptr - buffer;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-02 14:18:40 +08:00
|
|
|
static void binder_stat_br(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread, uint32_t cmd)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_return(cmd);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
|
2017-06-30 03:01:44 +08:00
|
|
|
atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
|
|
|
|
atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
|
|
|
|
atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:47 +08:00
|
|
|
static int binder_put_node_cmd(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
|
|
|
void __user **ptrp,
|
|
|
|
binder_uintptr_t node_ptr,
|
|
|
|
binder_uintptr_t node_cookie,
|
|
|
|
int node_debug_id,
|
|
|
|
uint32_t cmd, const char *cmd_name)
|
|
|
|
{
|
|
|
|
void __user *ptr = *ptrp;
|
|
|
|
|
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
|
|
|
|
if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
|
|
|
|
if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
|
binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
|
|
|
|
proc->pid, thread->pid, cmd_name, node_debug_id,
|
|
|
|
(u64)node_ptr, (u64)node_cookie);
|
|
|
|
|
|
|
|
*ptrp = ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-31 16:04:18 +08:00
|
|
|
static int binder_wait_for_work(struct binder_thread *thread,
|
|
|
|
bool do_proc_work)
|
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
for (;;) {
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 19:18:22 +08:00
|
|
|
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
|
2017-08-31 16:04:18 +08:00
|
|
|
if (binder_has_work_ilocked(thread, do_proc_work))
|
|
|
|
break;
|
|
|
|
if (do_proc_work)
|
|
|
|
list_add(&thread->waiting_thread_node,
|
|
|
|
&proc->waiting_threads);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
schedule();
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
list_del_init(&thread->waiting_thread_node);
|
|
|
|
if (signal_pending(current)) {
|
2021-03-16 09:16:29 +08:00
|
|
|
ret = -EINTR;
|
2017-08-31 16:04:18 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
finish_wait(&thread->wait, &wait);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-29 04:46:25 +08:00
|
|
|
/**
|
|
|
|
* binder_apply_fd_fixups() - finish fd translation
|
2019-02-09 02:35:15 +08:00
|
|
|
* @proc: binder_proc associated @t->buffer
|
2018-08-29 04:46:25 +08:00
|
|
|
* @t: binder transaction with list of fd fixups
|
|
|
|
*
|
|
|
|
* Now that we are in the context of the transaction target
|
|
|
|
* process, we can allocate and install fds. Process the
|
|
|
|
* list of fds to translate and fixup the buffer with the
|
2022-03-26 07:24:54 +08:00
|
|
|
* new fds first and only then install the files.
|
2018-08-29 04:46:25 +08:00
|
|
|
*
|
2022-03-26 07:24:54 +08:00
|
|
|
* If we fail to allocate an fd, skip the install and release
|
2018-08-29 04:46:25 +08:00
|
|
|
* any fds that have already been allocated.
|
|
|
|
*/
|
2019-02-09 02:35:15 +08:00
|
|
|
static int binder_apply_fd_fixups(struct binder_proc *proc,
|
|
|
|
struct binder_transaction *t)
|
2018-08-29 04:46:25 +08:00
|
|
|
{
|
|
|
|
struct binder_txn_fd_fixup *fixup, *tmp;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
|
|
|
|
int fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
|
|
|
|
|
if (fd < 0) {
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
"failed fd fixup txn %d fd %d\n",
|
|
|
|
t->debug_id, fd);
|
|
|
|
ret = -ENOMEM;
|
2022-03-26 07:24:54 +08:00
|
|
|
goto err;
|
2018-08-29 04:46:25 +08:00
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
|
"fd fixup txn %d fd %d\n",
|
|
|
|
t->debug_id, fd);
|
|
|
|
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
|
2022-03-26 07:24:54 +08:00
|
|
|
fixup->target_fd = fd;
|
2019-06-29 00:50:12 +08:00
|
|
|
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
|
|
|
|
fixup->offset, &fd,
|
|
|
|
sizeof(u32))) {
|
|
|
|
ret = -EINVAL;
|
2022-03-26 07:24:54 +08:00
|
|
|
goto err;
|
2019-06-29 00:50:12 +08:00
|
|
|
}
|
2018-08-29 04:46:25 +08:00
|
|
|
}
|
|
|
|
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
2022-03-26 07:24:54 +08:00
|
|
|
fd_install(fixup->target_fd, fixup->file);
|
2018-08-29 04:46:25 +08:00
|
|
|
list_del(&fixup->fixup_entry);
|
|
|
|
kfree(fixup);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2022-03-26 07:24:54 +08:00
|
|
|
|
|
|
|
err:
|
|
|
|
binder_free_txn_fixups(t);
|
|
|
|
return ret;
|
2018-08-29 04:46:25 +08:00
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread,
|
2014-02-22 06:40:26 +08:00
|
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
|
|
binder_size_t *consumed, int non_block)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2014-02-22 06:40:26 +08:00
|
|
|
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
2011-11-30 19:18:14 +08:00
|
|
|
void __user *ptr = buffer + *consumed;
|
|
|
|
void __user *end = buffer + size;
|
|
|
|
|
|
|
|
int ret = 0;
|
|
|
|
int wait_for_proc_work;
|
|
|
|
|
|
|
|
if (*consumed == 0) {
|
|
|
|
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
retry:
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-08-31 16:04:18 +08:00
|
|
|
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
2012-10-17 06:29:53 +08:00
|
|
|
|
|
|
|
trace_binder_wait_for_work(wait_for_proc_work,
|
|
|
|
!!thread->transaction_stack,
|
2017-06-30 03:02:02 +08:00
|
|
|
!binder_worklist_empty(proc, &thread->todo));
|
2011-11-30 19:18:14 +08:00
|
|
|
if (wait_for_proc_work) {
|
|
|
|
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
|
|
BINDER_LOOPER_STATE_ENTERED))) {
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid, thread->looper);
|
|
|
|
wait_event_interruptible(binder_user_error_wait,
|
|
|
|
binder_stop_on_user_error < 2);
|
|
|
|
}
|
|
|
|
binder_set_nice(proc->default_priority);
|
2017-08-31 16:04:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (non_block) {
|
|
|
|
if (!binder_has_work(thread, wait_for_proc_work))
|
|
|
|
ret = -EAGAIN;
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2017-08-31 16:04:18 +08:00
|
|
|
ret = binder_wait_for_work(thread, wait_for_proc_work);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2012-10-17 06:29:53 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
uint32_t cmd;
|
2019-01-15 01:10:21 +08:00
|
|
|
struct binder_transaction_data_secctx tr;
|
|
|
|
struct binder_transaction_data *trd = &tr.transaction_data;
|
2017-06-30 03:02:02 +08:00
|
|
|
struct binder_work *w = NULL;
|
|
|
|
struct list_head *list = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_transaction *t = NULL;
|
2017-06-30 03:01:57 +08:00
|
|
|
struct binder_thread *t_from;
|
2019-01-15 01:10:21 +08:00
|
|
|
size_t trsize = sizeof(*trd);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-06-30 03:02:02 +08:00
|
|
|
if (!binder_worklist_empty_ilocked(&thread->todo))
|
|
|
|
list = &thread->todo;
|
|
|
|
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
|
|
|
|
wait_for_proc_work)
|
|
|
|
list = &proc->todo;
|
|
|
|
else {
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
2014-09-08 22:16:34 +08:00
|
|
|
/* no data added */
|
2017-06-30 03:01:49 +08:00
|
|
|
if (ptr - buffer == 4 && !thread->looper_need_return)
|
2011-11-30 19:18:14 +08:00
|
|
|
goto retry;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:01 +08:00
|
|
|
if (end - ptr < sizeof(tr) + 4) {
|
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
2017-06-30 03:02:01 +08:00
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
w = binder_dequeue_work_head_ilocked(list);
|
2017-11-15 16:21:35 +08:00
|
|
|
if (binder_worklist_empty_ilocked(&thread->todo))
|
|
|
|
thread->process_todo = false;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
switch (w->type) {
|
|
|
|
case BINDER_WORK_TRANSACTION: {
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
t = container_of(w, struct binder_transaction, work);
|
|
|
|
} break;
|
2017-06-30 03:01:55 +08:00
|
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
|
|
struct binder_error *e = container_of(
|
|
|
|
w, struct binder_error, work);
|
|
|
|
|
|
|
|
WARN_ON(e->cmd == BR_OK);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:55 +08:00
|
|
|
if (put_user(e->cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
2018-05-10 10:05:03 +08:00
|
|
|
cmd = e->cmd;
|
2017-06-30 03:01:55 +08:00
|
|
|
e->cmd = BR_OK;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
|
2018-05-10 10:05:03 +08:00
|
|
|
binder_stat_br(proc, thread, cmd);
|
2017-06-30 03:01:55 +08:00
|
|
|
} break;
|
2021-04-09 17:40:46 +08:00
|
|
|
case BINDER_WORK_TRANSACTION_COMPLETE:
|
2022-11-24 04:16:54 +08:00
|
|
|
case BINDER_WORK_TRANSACTION_PENDING:
|
2021-04-09 17:40:46 +08:00
|
|
|
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
|
|
|
|
if (proc->oneway_spam_detection_enabled &&
|
|
|
|
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
|
|
|
|
cmd = BR_ONEWAY_SPAM_SUSPECT;
|
2022-11-24 04:16:54 +08:00
|
|
|
else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
|
|
|
|
cmd = BR_TRANSACTION_PENDING_FROZEN;
|
2021-04-09 17:40:46 +08:00
|
|
|
else
|
|
|
|
cmd = BR_TRANSACTION_COMPLETE;
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2019-06-22 01:54:15 +08:00
|
|
|
kfree(w);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d:%d BR_TRANSACTION_COMPLETE\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
} break;
|
|
|
|
case BINDER_WORK_NODE: {
|
|
|
|
struct binder_node *node = container_of(w, struct binder_node, work);
|
2017-06-30 03:01:47 +08:00
|
|
|
int strong, weak;
|
|
|
|
binder_uintptr_t node_ptr = node->ptr;
|
|
|
|
binder_uintptr_t node_cookie = node->cookie;
|
|
|
|
int node_debug_id = node->debug_id;
|
|
|
|
int has_weak_ref;
|
|
|
|
int has_strong_ref;
|
|
|
|
void __user *orig_ptr = ptr;
|
|
|
|
|
|
|
|
BUG_ON(proc != node->proc);
|
|
|
|
strong = node->internal_strong_refs ||
|
|
|
|
node->local_strong_refs;
|
|
|
|
weak = !hlist_empty(&node->refs) ||
|
2017-06-30 03:01:59 +08:00
|
|
|
node->local_weak_refs ||
|
|
|
|
node->tmp_refs || strong;
|
2017-06-30 03:01:47 +08:00
|
|
|
has_strong_ref = node->has_strong_ref;
|
|
|
|
has_weak_ref = node->has_weak_ref;
|
|
|
|
|
|
|
|
if (weak && !has_weak_ref) {
|
2011-11-30 19:18:14 +08:00
|
|
|
node->has_weak_ref = 1;
|
|
|
|
node->pending_weak_ref = 1;
|
|
|
|
node->local_weak_refs++;
|
2017-06-30 03:01:47 +08:00
|
|
|
}
|
|
|
|
if (strong && !has_strong_ref) {
|
2011-11-30 19:18:14 +08:00
|
|
|
node->has_strong_ref = 1;
|
|
|
|
node->pending_strong_ref = 1;
|
|
|
|
node->local_strong_refs++;
|
2017-06-30 03:01:47 +08:00
|
|
|
}
|
|
|
|
if (!strong && has_strong_ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
node->has_strong_ref = 0;
|
2017-06-30 03:01:47 +08:00
|
|
|
if (!weak && has_weak_ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
node->has_weak_ref = 0;
|
2017-06-30 03:01:47 +08:00
|
|
|
if (!weak && !strong) {
|
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
|
"%d:%d node %d u%016llx c%016llx deleted\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
node_debug_id,
|
|
|
|
(u64)node_ptr,
|
|
|
|
(u64)node_cookie);
|
|
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(node);
|
|
|
|
/*
|
|
|
|
* Acquire the node lock before freeing the
|
|
|
|
* node to serialize with other threads that
|
|
|
|
* may have been holding the node lock while
|
|
|
|
* decrementing this node (avoids race where
|
|
|
|
* this thread frees while the other thread
|
|
|
|
* is unlocking the node after the final
|
|
|
|
* decrement)
|
|
|
|
*/
|
|
|
|
binder_node_unlock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_free_node(node);
|
|
|
|
} else
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
2017-06-30 03:01:47 +08:00
|
|
|
if (weak && !has_weak_ref)
|
|
|
|
ret = binder_put_node_cmd(
|
|
|
|
proc, thread, &ptr, node_ptr,
|
|
|
|
node_cookie, node_debug_id,
|
|
|
|
BR_INCREFS, "BR_INCREFS");
|
|
|
|
if (!ret && strong && !has_strong_ref)
|
|
|
|
ret = binder_put_node_cmd(
|
|
|
|
proc, thread, &ptr, node_ptr,
|
|
|
|
node_cookie, node_debug_id,
|
|
|
|
BR_ACQUIRE, "BR_ACQUIRE");
|
|
|
|
if (!ret && !strong && has_strong_ref)
|
|
|
|
ret = binder_put_node_cmd(
|
|
|
|
proc, thread, &ptr, node_ptr,
|
|
|
|
node_cookie, node_debug_id,
|
|
|
|
BR_RELEASE, "BR_RELEASE");
|
|
|
|
if (!ret && !weak && has_weak_ref)
|
|
|
|
ret = binder_put_node_cmd(
|
|
|
|
proc, thread, &ptr, node_ptr,
|
|
|
|
node_cookie, node_debug_id,
|
|
|
|
BR_DECREFS, "BR_DECREFS");
|
|
|
|
if (orig_ptr == ptr)
|
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
|
"%d:%d node %d u%016llx c%016llx state unchanged\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
node_debug_id,
|
|
|
|
(u64)node_ptr,
|
|
|
|
(u64)node_cookie);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
|
|
|
case BINDER_WORK_DEAD_BINDER:
|
|
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
|
|
|
|
struct binder_ref_death *death;
|
|
|
|
uint32_t cmd;
|
2017-06-30 03:02:10 +08:00
|
|
|
binder_uintptr_t cookie;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
death = container_of(w, struct binder_ref_death, work);
|
|
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
|
|
|
|
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
|
|
|
|
else
|
|
|
|
cmd = BR_DEAD_BINDER;
|
2017-06-30 03:02:10 +08:00
|
|
|
cookie = death->cookie;
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
2014-02-22 06:40:26 +08:00
|
|
|
"%d:%d %s %016llx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
cmd == BR_DEAD_BINDER ?
|
|
|
|
"BR_DEAD_BINDER" :
|
|
|
|
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
2017-06-30 03:02:10 +08:00
|
|
|
(u64)cookie);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
2017-06-30 03:02:10 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
kfree(death);
|
|
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
2017-06-30 03:02:01 +08:00
|
|
|
} else {
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_enqueue_work_ilocked(
|
|
|
|
w, &proc->delivered_death);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
}
|
2017-06-30 03:02:10 +08:00
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
if (put_user(cookie,
|
|
|
|
(binder_uintptr_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (cmd == BR_DEAD_BINDER)
|
|
|
|
goto done; /* DEAD_BINDER notifications can cause transactions */
|
|
|
|
} break;
|
2018-11-07 07:56:31 +08:00
|
|
|
default:
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
pr_err("%d:%d: bad work type %d\n",
|
|
|
|
proc->pid, thread->pid, w->type);
|
|
|
|
break;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!t)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
BUG_ON(t->buffer == NULL);
|
|
|
|
if (t->buffer->target_node) {
|
|
|
|
struct binder_node *target_node = t->buffer->target_node;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->target.ptr = target_node->ptr;
|
|
|
|
trd->cookie = target_node->cookie;
|
2011-11-30 19:18:14 +08:00
|
|
|
t->saved_priority = task_nice(current);
|
|
|
|
if (t->priority < target_node->min_priority &&
|
|
|
|
!(t->flags & TF_ONE_WAY))
|
|
|
|
binder_set_nice(t->priority);
|
|
|
|
else if (!(t->flags & TF_ONE_WAY) ||
|
|
|
|
t->saved_priority > target_node->min_priority)
|
|
|
|
binder_set_nice(target_node->min_priority);
|
|
|
|
cmd = BR_TRANSACTION;
|
|
|
|
} else {
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->target.ptr = 0;
|
|
|
|
trd->cookie = 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
cmd = BR_REPLY;
|
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->code = t->code;
|
|
|
|
trd->flags = t->flags;
|
|
|
|
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
t_from = binder_get_txn_from(t);
|
|
|
|
if (t_from) {
|
|
|
|
struct task_struct *sender = t_from->proc->tsk;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->sender_pid =
|
|
|
|
task_tgid_nr_ns(sender,
|
|
|
|
task_active_pid_ns(current));
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->sender_pid = 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2019-02-09 02:35:15 +08:00
|
|
|
ret = binder_apply_fd_fixups(proc, t);
|
2018-08-29 04:46:25 +08:00
|
|
|
if (ret) {
|
|
|
|
struct binder_buffer *buffer = t->buffer;
|
|
|
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
|
|
int tid = t->debug_id;
|
|
|
|
|
|
|
|
if (t_from)
|
|
|
|
binder_thread_dec_tmpref(t_from);
|
|
|
|
buffer->transaction = NULL;
|
|
|
|
binder_cleanup_transaction(t, "fd fixups failed",
|
|
|
|
BR_FAILED_REPLY);
|
2021-10-16 07:38:11 +08:00
|
|
|
binder_free_buf(proc, thread, buffer, true);
|
2018-08-29 04:46:25 +08:00
|
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
|
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
oneway ? "async " :
|
|
|
|
(cmd == BR_REPLY ? "reply " : ""),
|
|
|
|
tid, BR_FAILED_REPLY, ret, __LINE__);
|
|
|
|
if (cmd == BR_REPLY) {
|
|
|
|
cmd = BR_FAILED_REPLY;
|
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->data_size = t->buffer->data_size;
|
|
|
|
trd->offsets_size = t->buffer->offsets_size;
|
2019-02-09 02:35:20 +08:00
|
|
|
trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
|
2019-01-15 01:10:21 +08:00
|
|
|
trd->data.ptr.offsets = trd->data.ptr.buffer +
|
2011-11-30 19:18:14 +08:00
|
|
|
ALIGN(t->buffer->data_size,
|
|
|
|
sizeof(void *));
|
|
|
|
|
2019-01-15 01:10:21 +08:00
|
|
|
tr.secctx = t->security_ctx;
|
|
|
|
if (t->security_ctx) {
|
|
|
|
cmd = BR_TRANSACTION_SEC_CTX;
|
|
|
|
trsize = sizeof(tr);
|
|
|
|
}
|
2017-06-30 03:01:57 +08:00
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr)) {
|
|
|
|
if (t_from)
|
|
|
|
binder_thread_dec_tmpref(t_from);
|
2017-11-13 17:06:08 +08:00
|
|
|
|
|
|
|
binder_cleanup_transaction(t, "put_user failed",
|
|
|
|
BR_FAILED_REPLY);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2017-06-30 03:01:57 +08:00
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
ptr += sizeof(uint32_t);
|
2019-01-15 01:10:21 +08:00
|
|
|
if (copy_to_user(ptr, &tr, trsize)) {
|
2017-06-30 03:01:57 +08:00
|
|
|
if (t_from)
|
|
|
|
binder_thread_dec_tmpref(t_from);
|
2017-11-13 17:06:08 +08:00
|
|
|
|
|
|
|
binder_cleanup_transaction(t, "copy_to_user failed",
|
|
|
|
BR_FAILED_REPLY);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return -EFAULT;
|
2017-06-30 03:01:57 +08:00
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
ptr += trsize;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_transaction_received(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
2022-05-10 07:19:01 +08:00
|
|
|
"%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid,
|
|
|
|
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
|
2019-01-15 01:10:21 +08:00
|
|
|
(cmd == BR_TRANSACTION_SEC_CTX) ?
|
|
|
|
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
|
2017-06-30 03:01:57 +08:00
|
|
|
t->debug_id, t_from ? t_from->proc->pid : 0,
|
|
|
|
t_from ? t_from->pid : 0, cmd,
|
2011-11-30 19:18:14 +08:00
|
|
|
t->buffer->data_size, t->buffer->offsets_size,
|
2019-01-15 01:10:21 +08:00
|
|
|
(u64)trd->data.ptr.buffer,
|
|
|
|
(u64)trd->data.ptr.offsets);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
if (t_from)
|
|
|
|
binder_thread_dec_tmpref(t_from);
|
2011-11-30 19:18:14 +08:00
|
|
|
t->buffer->allow_user_free = 1;
|
2019-01-15 01:10:21 +08:00
|
|
|
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(thread->proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
t->to_parent = thread->transaction_stack;
|
|
|
|
t->to_thread = thread;
|
|
|
|
thread->transaction_stack = t;
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
} else {
|
2017-06-30 03:01:54 +08:00
|
|
|
binder_free_transaction(t);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
|
|
|
*consumed = ptr - buffer;
|
2017-06-30 03:02:07 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-08-31 16:04:18 +08:00
|
|
|
if (proc->requested_threads == 0 &&
|
|
|
|
list_empty(&thread->proc->waiting_threads) &&
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->requested_threads_started < proc->max_threads &&
|
|
|
|
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
|
|
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
|
|
|
|
/*spawn a new thread if we leave this out */) {
|
|
|
|
proc->requested_threads++;
|
2017-06-30 03:02:07 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d:%d BR_SPAWN_LOOPER\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
|
|
|
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
|
|
|
|
return -EFAULT;
|
2012-10-17 06:29:52 +08:00
|
|
|
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
|
2017-06-30 03:02:07 +08:00
|
|
|
} else
|
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
static void binder_release_work(struct binder_proc *proc,
|
|
|
|
struct list_head *list)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_work *w;
|
2020-10-10 07:24:55 +08:00
|
|
|
enum binder_work_type wtype;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
while (1) {
|
2020-10-10 07:24:55 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
w = binder_dequeue_work_head_ilocked(list);
|
|
|
|
wtype = w ? w->type : 0;
|
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:02:02 +08:00
|
|
|
if (!w)
|
|
|
|
return;
|
|
|
|
|
2020-10-10 07:24:55 +08:00
|
|
|
switch (wtype) {
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_WORK_TRANSACTION: {
|
|
|
|
struct binder_transaction *t;
|
|
|
|
|
|
|
|
t = container_of(w, struct binder_transaction, work);
|
2017-11-13 17:06:08 +08:00
|
|
|
|
|
|
|
binder_cleanup_transaction(t, "process died.",
|
|
|
|
BR_DEAD_REPLY);
|
2011-11-30 19:18:14 +08:00
|
|
|
} break;
|
2017-06-30 03:01:55 +08:00
|
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
|
|
struct binder_error *e = container_of(
|
|
|
|
w, struct binder_error, work);
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
|
|
"undelivered TRANSACTION_ERROR: %u\n",
|
|
|
|
e->cmd);
|
|
|
|
} break;
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
2012-10-17 06:29:54 +08:00
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
2012-10-31 01:05:43 +08:00
|
|
|
"undelivered TRANSACTION_COMPLETE\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
kfree(w);
|
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
|
} break;
|
2012-10-17 06:29:54 +08:00
|
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
|
|
|
|
struct binder_ref_death *death;
|
|
|
|
|
|
|
|
death = container_of(w, struct binder_ref_death, work);
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
2014-02-22 06:40:26 +08:00
|
|
|
"undelivered death notification, %016llx\n",
|
|
|
|
(u64)death->cookie);
|
2012-10-17 06:29:54 +08:00
|
|
|
kfree(death);
|
|
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
|
|
} break;
|
2020-10-10 07:24:55 +08:00
|
|
|
case BINDER_WORK_NODE:
|
|
|
|
break;
|
2011-11-30 19:18:14 +08:00
|
|
|
default:
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_err("unexpected work type, %d, not freed\n",
|
2020-10-10 07:24:55 +08:00
|
|
|
wtype);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:05 +08:00
|
|
|
static struct binder_thread *binder_get_thread_ilocked(
|
|
|
|
struct binder_proc *proc, struct binder_thread *new_thread)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_thread *thread = NULL;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct rb_node **p = &proc->threads.rb_node;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
thread = rb_entry(parent, struct binder_thread, rb_node);
|
|
|
|
|
|
|
|
if (current->pid < thread->pid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (current->pid > thread->pid)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
2017-06-30 03:02:05 +08:00
|
|
|
return thread;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:05 +08:00
|
|
|
if (!new_thread)
|
|
|
|
return NULL;
|
|
|
|
thread = new_thread;
|
|
|
|
binder_stats_created(BINDER_STAT_THREAD);
|
|
|
|
thread->proc = proc;
|
|
|
|
thread->pid = current->pid;
|
|
|
|
atomic_set(&thread->tmp_ref, 0);
|
|
|
|
init_waitqueue_head(&thread->wait);
|
|
|
|
INIT_LIST_HEAD(&thread->todo);
|
|
|
|
rb_link_node(&thread->rb_node, parent, p);
|
|
|
|
rb_insert_color(&thread->rb_node, &proc->threads);
|
|
|
|
thread->looper_need_return = true;
|
|
|
|
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
|
|
thread->return_error.cmd = BR_OK;
|
|
|
|
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
|
|
thread->reply_error.cmd = BR_OK;
|
2022-04-30 07:56:41 +08:00
|
|
|
thread->ee.command = BR_OK;
|
2017-08-31 16:04:18 +08:00
|
|
|
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
2017-06-30 03:02:05 +08:00
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
|
|
|
{
|
|
|
|
struct binder_thread *thread;
|
|
|
|
struct binder_thread *new_thread;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
thread = binder_get_thread_ilocked(proc, NULL);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (!thread) {
|
|
|
|
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
|
|
|
if (new_thread == NULL)
|
2011-11-30 19:18:14 +08:00
|
|
|
return NULL;
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
thread = binder_get_thread_ilocked(proc, new_thread);
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (thread != new_thread)
|
|
|
|
kfree(new_thread);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
static void binder_free_proc(struct binder_proc *proc)
|
|
|
|
{
|
2020-06-23 04:07:15 +08:00
|
|
|
struct binder_device *device;
|
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
BUG_ON(!list_empty(&proc->todo));
|
|
|
|
BUG_ON(!list_empty(&proc->delivered_death));
|
2021-03-16 09:16:28 +08:00
|
|
|
if (proc->outstanding_txns)
|
|
|
|
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
|
|
|
__func__, proc->outstanding_txns);
|
2020-06-23 04:07:15 +08:00
|
|
|
device = container_of(proc->context, struct binder_device, context);
|
|
|
|
if (refcount_dec_and_test(&device->ref)) {
|
|
|
|
kfree(proc->context->name);
|
|
|
|
kfree(device);
|
|
|
|
}
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_alloc_deferred_release(&proc->alloc);
|
|
|
|
put_task_struct(proc->tsk);
|
2021-10-13 00:56:12 +08:00
|
|
|
put_cred(proc->cred);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_stats_deleted(BINDER_STAT_PROC);
|
|
|
|
kfree(proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_free_thread(struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
BUG_ON(!list_empty(&thread->todo));
|
|
|
|
binder_stats_deleted(BINDER_STAT_THREAD);
|
|
|
|
binder_proc_dec_tmpref(thread->proc);
|
|
|
|
kfree(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_thread_release(struct binder_proc *proc,
|
|
|
|
struct binder_thread *thread)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_transaction *t;
|
|
|
|
struct binder_transaction *send_reply = NULL;
|
|
|
|
int active_transactions = 0;
|
2017-06-30 03:01:57 +08:00
|
|
|
struct binder_transaction *last_t = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
/*
|
|
|
|
* take a ref on the proc so it survives
|
|
|
|
* after we remove this thread from proc->threads.
|
|
|
|
* The corresponding dec is when we actually
|
|
|
|
* free the thread in binder_free_thread()
|
|
|
|
*/
|
|
|
|
proc->tmp_ref++;
|
|
|
|
/*
|
|
|
|
* take a ref on this thread to ensure it
|
|
|
|
* survives while we are releasing it
|
|
|
|
*/
|
|
|
|
atomic_inc(&thread->tmp_ref);
|
2011-11-30 19:18:14 +08:00
|
|
|
rb_erase(&thread->rb_node, &proc->threads);
|
|
|
|
t = thread->transaction_stack;
|
2017-06-30 03:01:57 +08:00
|
|
|
if (t) {
|
|
|
|
spin_lock(&t->lock);
|
|
|
|
if (t->to_thread == thread)
|
|
|
|
send_reply = t;
|
2018-11-07 07:56:31 +08:00
|
|
|
} else {
|
|
|
|
__acquire(&t->lock);
|
2017-06-30 03:01:57 +08:00
|
|
|
}
|
|
|
|
thread->is_dead = true;
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
while (t) {
|
2017-06-30 03:01:57 +08:00
|
|
|
last_t = t;
|
2011-11-30 19:18:14 +08:00
|
|
|
active_transactions++;
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
2012-10-31 01:05:43 +08:00
|
|
|
"release %d:%d transaction %d %s, still active\n",
|
|
|
|
proc->pid, thread->pid,
|
2011-11-30 19:18:14 +08:00
|
|
|
t->debug_id,
|
|
|
|
(t->to_thread == thread) ? "in" : "out");
|
|
|
|
|
|
|
|
if (t->to_thread == thread) {
|
2021-03-16 09:16:28 +08:00
|
|
|
thread->proc->outstanding_txns--;
|
2011-11-30 19:18:14 +08:00
|
|
|
t->to_proc = NULL;
|
|
|
|
t->to_thread = NULL;
|
|
|
|
if (t->buffer) {
|
|
|
|
t->buffer->transaction = NULL;
|
|
|
|
t->buffer = NULL;
|
|
|
|
}
|
|
|
|
t = t->to_parent;
|
|
|
|
} else if (t->from == thread) {
|
|
|
|
t->from = NULL;
|
|
|
|
t = t->from_parent;
|
|
|
|
} else
|
|
|
|
BUG();
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_unlock(&last_t->lock);
|
|
|
|
if (t)
|
|
|
|
spin_lock(&t->lock);
|
2018-11-07 07:56:31 +08:00
|
|
|
else
|
|
|
|
__acquire(&t->lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2018-11-07 07:56:31 +08:00
|
|
|
/* annotation for sparse, lock not acquired in last iteration above */
|
|
|
|
__release(&t->lock);
|
2018-01-05 18:27:07 +08:00
|
|
|
|
|
|
|
/*
|
2021-12-09 09:04:52 +08:00
|
|
|
* If this thread used poll, make sure we remove the waitqueue from any
|
|
|
|
* poll data structures holding it.
|
2018-01-05 18:27:07 +08:00
|
|
|
*/
|
2021-12-09 09:04:52 +08:00
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
|
|
|
wake_up_pollfree(&thread->wait);
|
2018-01-05 18:27:07 +08:00
|
|
|
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
|
2018-02-16 16:47:15 +08:00
|
|
|
/*
|
2021-12-09 09:04:52 +08:00
|
|
|
* This is needed to avoid races between wake_up_pollfree() above and
|
|
|
|
* someone else removing the last entry from the queue for other reasons
|
|
|
|
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
|
|
|
* descriptor being closed). Such other users hold an RCU read lock, so
|
|
|
|
* we can be sure they're done after we call synchronize_rcu().
|
2018-02-16 16:47:15 +08:00
|
|
|
*/
|
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
|
|
|
synchronize_rcu();
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (send_reply)
|
|
|
|
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_release_work(proc, &thread->todo);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_thread_dec_tmpref(thread);
|
2011-11-30 19:18:14 +08:00
|
|
|
return active_transactions;
|
|
|
|
}
|
|
|
|
|
2017-07-03 18:39:46 +08:00
|
|
|
static __poll_t binder_poll(struct file *filp,
|
2011-11-30 19:18:14 +08:00
|
|
|
struct poll_table_struct *wait)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
struct binder_thread *thread = NULL;
|
2017-08-31 16:04:18 +08:00
|
|
|
bool wait_for_proc_work;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
thread = binder_get_thread(proc);
|
2018-01-31 15:11:24 +08:00
|
|
|
if (!thread)
|
|
|
|
return POLLERR;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_lock(thread->proc);
|
2017-08-31 16:04:18 +08:00
|
|
|
thread->looper |= BINDER_LOOPER_STATE_POLL;
|
|
|
|
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
|
|
|
2017-06-30 03:02:06 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
2012-10-17 06:29:53 +08:00
|
|
|
|
2017-08-31 16:04:18 +08:00
|
|
|
poll_wait(filp, &thread->wait, wait);
|
|
|
|
|
2017-10-09 20:26:56 +08:00
|
|
|
if (binder_has_work(thread, wait_for_proc_work))
|
2018-02-12 06:34:03 +08:00
|
|
|
return EPOLLIN;
|
2017-08-31 16:04:18 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-15 20:03:51 +08:00
|
|
|
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
|
2014-06-04 03:27:21 +08:00
|
|
|
struct binder_thread *thread)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
void __user *ubuf = (void __user *)arg;
|
|
|
|
struct binder_write_read bwr;
|
|
|
|
|
|
|
|
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_READ_WRITE,
|
|
|
|
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)bwr.write_size, (u64)bwr.write_buffer,
|
|
|
|
(u64)bwr.read_size, (u64)bwr.read_buffer);
|
|
|
|
|
|
|
|
if (bwr.write_size > 0) {
|
|
|
|
ret = binder_thread_write(proc, thread,
|
|
|
|
bwr.write_buffer,
|
|
|
|
bwr.write_size,
|
|
|
|
&bwr.write_consumed);
|
|
|
|
trace_binder_write_done(ret);
|
|
|
|
if (ret < 0) {
|
|
|
|
bwr.read_consumed = 0;
|
|
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bwr.read_size > 0) {
|
|
|
|
ret = binder_thread_read(proc, thread, bwr.read_buffer,
|
|
|
|
bwr.read_size,
|
|
|
|
&bwr.read_consumed,
|
|
|
|
filp->f_flags & O_NONBLOCK);
|
|
|
|
trace_binder_read_done(ret);
|
2017-08-31 16:04:18 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
if (!binder_worklist_empty_ilocked(&proc->todo))
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_wakeup_proc_ilocked(proc);
|
2017-08-31 16:04:18 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2014-06-04 03:27:21 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_READ_WRITE,
|
|
|
|
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
|
|
|
|
proc->pid, thread->pid,
|
|
|
|
(u64)bwr.write_consumed, (u64)bwr.write_size,
|
|
|
|
(u64)bwr.read_consumed, (u64)bwr.read_size);
|
|
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-15 01:10:21 +08:00
|
|
|
static int binder_ioctl_set_ctx_mgr(struct file *filp,
|
|
|
|
struct flat_binder_object *fbo)
|
2014-06-04 03:27:21 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
2017-02-04 06:40:46 +08:00
|
|
|
struct binder_context *context = proc->context;
|
2017-06-30 03:01:43 +08:00
|
|
|
struct binder_node *new_node;
|
2014-06-04 03:27:21 +08:00
|
|
|
kuid_t curr_euid = current_euid();
|
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
2017-02-04 06:40:46 +08:00
|
|
|
if (context->binder_context_mgr_node) {
|
2014-06-04 03:27:21 +08:00
|
|
|
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-10-13 00:56:13 +08:00
|
|
|
ret = security_binder_set_context_mgr(proc->cred);
|
2015-01-21 23:54:10 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2017-02-04 06:40:46 +08:00
|
|
|
if (uid_valid(context->binder_context_mgr_uid)) {
|
|
|
|
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
|
2014-06-04 03:27:21 +08:00
|
|
|
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
|
|
|
|
from_kuid(&init_user_ns, curr_euid),
|
|
|
|
from_kuid(&init_user_ns,
|
2017-02-04 06:40:46 +08:00
|
|
|
context->binder_context_mgr_uid));
|
2014-06-04 03:27:21 +08:00
|
|
|
ret = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
2017-02-04 06:40:46 +08:00
|
|
|
context->binder_context_mgr_uid = curr_euid;
|
2014-06-04 03:27:21 +08:00
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
new_node = binder_new_node(proc, fbo);
|
2017-06-30 03:01:43 +08:00
|
|
|
if (!new_node) {
|
2014-06-04 03:27:21 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(new_node);
|
2017-06-30 03:01:43 +08:00
|
|
|
new_node->local_weak_refs++;
|
|
|
|
new_node->local_strong_refs++;
|
|
|
|
new_node->has_strong_ref = 1;
|
|
|
|
new_node->has_weak_ref = 1;
|
|
|
|
context->binder_context_mgr_node = new_node;
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(new_node);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(new_node);
|
2014-06-04 03:27:21 +08:00
|
|
|
out:
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
2014-06-04 03:27:21 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-07 21:38:37 +08:00
|
|
|
static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
|
|
|
|
struct binder_node_info_for_ref *info)
|
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_context *context = proc->context;
|
|
|
|
__u32 handle = info->handle;
|
|
|
|
|
|
|
|
if (info->strong_count || info->weak_count || info->reserved1 ||
|
|
|
|
info->reserved2 || info->reserved3) {
|
|
|
|
binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
|
|
|
|
proc->pid);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This ioctl may only be used by the context manager */
|
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
|
|
|
if (!context->binder_context_mgr_node ||
|
|
|
|
context->binder_context_mgr_node->proc != proc) {
|
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
|
|
|
|
|
|
|
node = binder_get_node_from_ref(proc, handle, true, NULL);
|
|
|
|
if (!node)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
info->strong_count = node->local_strong_refs +
|
|
|
|
node->internal_strong_refs;
|
|
|
|
info->weak_count = node->local_weak_refs;
|
|
|
|
|
|
|
|
binder_put_node(node);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-31 16:04:24 +08:00
|
|
|
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
|
|
|
struct binder_node_debug_info *info)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
binder_uintptr_t ptr = info->ptr;
|
|
|
|
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
|
|
|
struct binder_node *node = rb_entry(n, struct binder_node,
|
|
|
|
rb_node);
|
|
|
|
if (node->ptr > ptr) {
|
|
|
|
info->ptr = node->ptr;
|
|
|
|
info->cookie = node->cookie;
|
|
|
|
info->has_strong_ref = node->has_strong_ref;
|
|
|
|
info->has_weak_ref = node->has_weak_ref;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
binder: fix freeze race
Currently cgroup freezer is used to freeze the application threads, and
BINDER_FREEZE is used to freeze the corresponding binder interface.
There's already a mechanism in ioctl(BINDER_FREEZE) to wait for any
existing transactions to drain out before actually freezing the binder
interface.
But freezing an app requires 2 steps, freezing the binder interface with
ioctl(BINDER_FREEZE) and then freezing the application main threads with
cgroupfs. This is not an atomic operation. The following race issue
might happen.
1) Binder interface is frozen by ioctl(BINDER_FREEZE);
2) Main thread A initiates a new sync binder transaction to process B;
3) Main thread A is frozen by "echo 1 > cgroup.freeze";
4) The response from process B reaches the frozen thread, which will
unexpectedly fail.
This patch provides a mechanism to check if there's any new pending
transaction happening between ioctl(BINDER_FREEZE) and freezing the
main thread. If there's any, the main thread freezing operation can
be rolled back to finish the pending transaction.
Furthermore, the response might reach the binder driver before the
rollback actually happens. That will still cause failed transaction.
As the other process doesn't wait for another response of the response,
the response transaction failure can be fixed by treating the response
transaction like an oneway/async one, allowing it to reach the frozen
thread. And it will be consumed when the thread gets unfrozen later.
NOTE: This patch reuses the existing definition of struct
binder_frozen_status_info but expands the bit assignments of __u32
member sync_recv.
To ensure backward compatibility, bit 0 of sync_recv still indicates
there's an outstanding sync binder transaction. This patch adds new
information to bit 1 of sync_recv, indicating the binder transaction
happens exactly when there's a race.
If an existing userspace app runs on a new kernel, a sync binder call
will set bit 0 of sync_recv so ioctl(BINDER_GET_FROZEN_INFO) still
return the expected value (true). The app just doesn't check bit 1
intentionally so it doesn't have the ability to tell if there's a race.
This behavior is aligned with what happens on an old kernel which
doesn't set bit 1 at all.
A new userspace app can 1) check bit 0 to know if there's a sync binder
transaction happened when being frozen - same as before; and 2) check
bit 1 to know if that sync binder transaction happened exactly when
there's a race - a new information for rollback decision.
the same time, confirmed the pending transactions succeeded.
Fixes: 432ff1e91694 ("binder: BINDER_FREEZE ioctl")
Acked-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Li Li <dualli@google.com>
Test: stress test with apps being frozen and initiating binder calls at
Link: https://lore.kernel.org/r/20210910164210.2282716-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-09-11 00:42:10 +08:00
|
|
|
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
struct binder_thread *thread;
|
|
|
|
|
|
|
|
if (proc->outstanding_txns > 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
|
|
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
|
|
if (thread->transaction_stack)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-16 09:16:28 +08:00
|
|
|
static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
|
|
|
struct binder_proc *target_proc)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!info->enable) {
|
|
|
|
binder_inner_proc_lock(target_proc);
|
2021-03-16 09:16:30 +08:00
|
|
|
target_proc->sync_recv = false;
|
|
|
|
target_proc->async_recv = false;
|
2021-03-16 09:16:28 +08:00
|
|
|
target_proc->is_frozen = false;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Freezing the target. Prevent new transactions by
|
|
|
|
* setting frozen state. If timeout specified, wait
|
|
|
|
* for transactions to drain.
|
|
|
|
*/
|
|
|
|
binder_inner_proc_lock(target_proc);
|
2021-03-16 09:16:30 +08:00
|
|
|
target_proc->sync_recv = false;
|
|
|
|
target_proc->async_recv = false;
|
2021-03-16 09:16:28 +08:00
|
|
|
target_proc->is_frozen = true;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
|
|
|
|
if (info->timeout_ms > 0)
|
|
|
|
ret = wait_event_interruptible_timeout(
|
|
|
|
target_proc->freeze_wait,
|
|
|
|
(!target_proc->outstanding_txns),
|
|
|
|
msecs_to_jiffies(info->timeout_ms));
|
|
|
|
|
binder: fix freeze race
Currently cgroup freezer is used to freeze the application threads, and
BINDER_FREEZE is used to freeze the corresponding binder interface.
There's already a mechanism in ioctl(BINDER_FREEZE) to wait for any
existing transactions to drain out before actually freezing the binder
interface.
But freezing an app requires 2 steps, freezing the binder interface with
ioctl(BINDER_FREEZE) and then freezing the application main threads with
cgroupfs. This is not an atomic operation. The following race issue
might happen.
1) Binder interface is frozen by ioctl(BINDER_FREEZE);
2) Main thread A initiates a new sync binder transaction to process B;
3) Main thread A is frozen by "echo 1 > cgroup.freeze";
4) The response from process B reaches the frozen thread, which will
unexpectedly fail.
This patch provides a mechanism to check if there's any new pending
transaction happening between ioctl(BINDER_FREEZE) and freezing the
main thread. If there's any, the main thread freezing operation can
be rolled back to finish the pending transaction.
Furthermore, the response might reach the binder driver before the
rollback actually happens. That will still cause failed transaction.
As the other process doesn't wait for another response of the response,
the response transaction failure can be fixed by treating the response
transaction like an oneway/async one, allowing it to reach the frozen
thread. And it will be consumed when the thread gets unfrozen later.
NOTE: This patch reuses the existing definition of struct
binder_frozen_status_info but expands the bit assignments of __u32
member sync_recv.
To ensure backward compatibility, bit 0 of sync_recv still indicates
there's an outstanding sync binder transaction. This patch adds new
information to bit 1 of sync_recv, indicating the binder transaction
happens exactly when there's a race.
If an existing userspace app runs on a new kernel, a sync binder call
will set bit 0 of sync_recv so ioctl(BINDER_GET_FROZEN_INFO) still
return the expected value (true). The app just doesn't check bit 1
intentionally so it doesn't have the ability to tell if there's a race.
This behavior is aligned with what happens on an old kernel which
doesn't set bit 1 at all.
A new userspace app can 1) check bit 0 to know if there's a sync binder
transaction happened when being frozen - same as before; and 2) check
bit 1 to know if that sync binder transaction happened exactly when
there's a race - a new information for rollback decision.
the same time, confirmed the pending transactions succeeded.
Fixes: 432ff1e91694 ("binder: BINDER_FREEZE ioctl")
Acked-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Li Li <dualli@google.com>
Test: stress test with apps being frozen and initiating binder calls at
Link: https://lore.kernel.org/r/20210910164210.2282716-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-09-11 00:42:10 +08:00
|
|
|
/* Check pending transactions that wait for reply */
|
|
|
|
if (ret >= 0) {
|
|
|
|
binder_inner_proc_lock(target_proc);
|
|
|
|
if (binder_txns_pending_ilocked(target_proc))
|
|
|
|
ret = -EAGAIN;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
}
|
2021-03-16 09:16:28 +08:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
binder_inner_proc_lock(target_proc);
|
|
|
|
target_proc->is_frozen = false;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-03-16 09:16:30 +08:00
|
|
|
static int binder_ioctl_get_freezer_info(
|
|
|
|
struct binder_frozen_status_info *info)
|
|
|
|
{
|
|
|
|
struct binder_proc *target_proc;
|
|
|
|
bool found = false;
|
binder: fix freeze race
Currently cgroup freezer is used to freeze the application threads, and
BINDER_FREEZE is used to freeze the corresponding binder interface.
There's already a mechanism in ioctl(BINDER_FREEZE) to wait for any
existing transactions to drain out before actually freezing the binder
interface.
But freezing an app requires 2 steps, freezing the binder interface with
ioctl(BINDER_FREEZE) and then freezing the application main threads with
cgroupfs. This is not an atomic operation. The following race issue
might happen.
1) Binder interface is frozen by ioctl(BINDER_FREEZE);
2) Main thread A initiates a new sync binder transaction to process B;
3) Main thread A is frozen by "echo 1 > cgroup.freeze";
4) The response from process B reaches the frozen thread, which will
unexpectedly fail.
This patch provides a mechanism to check if there's any new pending
transaction happening between ioctl(BINDER_FREEZE) and freezing the
main thread. If there's any, the main thread freezing operation can
be rolled back to finish the pending transaction.
Furthermore, the response might reach the binder driver before the
rollback actually happens. That will still cause failed transaction.
As the other process doesn't wait for another response of the response,
the response transaction failure can be fixed by treating the response
transaction like an oneway/async one, allowing it to reach the frozen
thread. And it will be consumed when the thread gets unfrozen later.
NOTE: This patch reuses the existing definition of struct
binder_frozen_status_info but expands the bit assignments of __u32
member sync_recv.
To ensure backward compatibility, bit 0 of sync_recv still indicates
there's an outstanding sync binder transaction. This patch adds new
information to bit 1 of sync_recv, indicating the binder transaction
happens exactly when there's a race.
If an existing userspace app runs on a new kernel, a sync binder call
will set bit 0 of sync_recv so ioctl(BINDER_GET_FROZEN_INFO) still
return the expected value (true). The app just doesn't check bit 1
intentionally so it doesn't have the ability to tell if there's a race.
This behavior is aligned with what happens on an old kernel which
doesn't set bit 1 at all.
A new userspace app can 1) check bit 0 to know if there's a sync binder
transaction happened when being frozen - same as before; and 2) check
bit 1 to know if that sync binder transaction happened exactly when
there's a race - a new information for rollback decision.
the same time, confirmed the pending transactions succeeded.
Fixes: 432ff1e91694 ("binder: BINDER_FREEZE ioctl")
Acked-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Li Li <dualli@google.com>
Test: stress test with apps being frozen and initiating binder calls at
Link: https://lore.kernel.org/r/20210910164210.2282716-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-09-11 00:42:10 +08:00
|
|
|
__u32 txns_pending;
|
2021-03-16 09:16:30 +08:00
|
|
|
|
|
|
|
info->sync_recv = 0;
|
|
|
|
info->async_recv = 0;
|
|
|
|
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
|
|
if (target_proc->pid == info->pid) {
|
|
|
|
found = true;
|
|
|
|
binder_inner_proc_lock(target_proc);
|
binder: fix freeze race
Currently cgroup freezer is used to freeze the application threads, and
BINDER_FREEZE is used to freeze the corresponding binder interface.
There's already a mechanism in ioctl(BINDER_FREEZE) to wait for any
existing transactions to drain out before actually freezing the binder
interface.
But freezing an app requires 2 steps, freezing the binder interface with
ioctl(BINDER_FREEZE) and then freezing the application main threads with
cgroupfs. This is not an atomic operation. The following race issue
might happen.
1) Binder interface is frozen by ioctl(BINDER_FREEZE);
2) Main thread A initiates a new sync binder transaction to process B;
3) Main thread A is frozen by "echo 1 > cgroup.freeze";
4) The response from process B reaches the frozen thread, which will
unexpectedly fail.
This patch provides a mechanism to check if there's any new pending
transaction happening between ioctl(BINDER_FREEZE) and freezing the
main thread. If there's any, the main thread freezing operation can
be rolled back to finish the pending transaction.
Furthermore, the response might reach the binder driver before the
rollback actually happens. That will still cause failed transaction.
As the other process doesn't wait for another response of the response,
the response transaction failure can be fixed by treating the response
transaction like an oneway/async one, allowing it to reach the frozen
thread. And it will be consumed when the thread gets unfrozen later.
NOTE: This patch reuses the existing definition of struct
binder_frozen_status_info but expands the bit assignments of __u32
member sync_recv.
To ensure backward compatibility, bit 0 of sync_recv still indicates
there's an outstanding sync binder transaction. This patch adds new
information to bit 1 of sync_recv, indicating the binder transaction
happens exactly when there's a race.
If an existing userspace app runs on a new kernel, a sync binder call
will set bit 0 of sync_recv so ioctl(BINDER_GET_FROZEN_INFO) still
return the expected value (true). The app just doesn't check bit 1
intentionally so it doesn't have the ability to tell if there's a race.
This behavior is aligned with what happens on an old kernel which
doesn't set bit 1 at all.
A new userspace app can 1) check bit 0 to know if there's a sync binder
transaction happened when being frozen - same as before; and 2) check
bit 1 to know if that sync binder transaction happened exactly when
there's a race - a new information for rollback decision.
the same time, confirmed the pending transactions succeeded.
Fixes: 432ff1e91694 ("binder: BINDER_FREEZE ioctl")
Acked-by: Todd Kjos <tkjos@google.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Li Li <dualli@google.com>
Test: stress test with apps being frozen and initiating binder calls at
Link: https://lore.kernel.org/r/20210910164210.2282716-2-dualli@chromium.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2021-09-11 00:42:10 +08:00
|
|
|
txns_pending = binder_txns_pending_ilocked(target_proc);
|
|
|
|
info->sync_recv |= target_proc->sync_recv |
|
|
|
|
(txns_pending << 1);
|
2021-03-16 09:16:30 +08:00
|
|
|
info->async_recv |= target_proc->async_recv;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-30 07:56:41 +08:00
|
|
|
static int binder_ioctl_get_extended_error(struct binder_thread *thread,
|
|
|
|
void __user *ubuf)
|
|
|
|
{
|
2022-05-18 09:17:54 +08:00
|
|
|
struct binder_extended_error ee;
|
2022-04-30 07:56:41 +08:00
|
|
|
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
2022-05-18 09:17:54 +08:00
|
|
|
ee = thread->ee;
|
|
|
|
binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
|
2022-04-30 07:56:41 +08:00
|
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
|
2022-05-18 09:17:54 +08:00
|
|
|
if (copy_to_user(ubuf, &ee, sizeof(ee)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2022-04-30 07:56:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
struct binder_thread *thread;
|
|
|
|
void __user *ubuf = (void __user *)arg;
|
|
|
|
|
2014-06-04 03:27:21 +08:00
|
|
|
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
|
|
|
|
proc->pid, current->pid, cmd, arg);*/
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-08-23 23:46:40 +08:00
|
|
|
binder_selftest_alloc(&proc->alloc);
|
|
|
|
|
2012-10-17 06:29:53 +08:00
|
|
|
trace_binder_ioctl(cmd, arg);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
|
|
if (ret)
|
2012-10-17 06:29:53 +08:00
|
|
|
goto err_unlocked;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
thread = binder_get_thread(proc);
|
|
|
|
if (thread == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cmd) {
|
2014-06-04 03:27:21 +08:00
|
|
|
case BINDER_WRITE_READ:
|
2022-11-15 20:03:51 +08:00
|
|
|
ret = binder_ioctl_write_read(filp, arg, thread);
|
2014-06-04 03:27:21 +08:00
|
|
|
if (ret)
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err;
|
|
|
|
break;
|
2017-06-30 03:02:07 +08:00
|
|
|
case BINDER_SET_MAX_THREADS: {
|
|
|
|
int max_threads;
|
|
|
|
|
|
|
|
if (copy_from_user(&max_threads, ubuf,
|
|
|
|
sizeof(max_threads))) {
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
2017-06-30 03:02:07 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
proc->max_threads = max_threads;
|
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
2017-06-30 03:02:07 +08:00
|
|
|
}
|
2019-01-15 01:10:21 +08:00
|
|
|
case BINDER_SET_CONTEXT_MGR_EXT: {
|
|
|
|
struct flat_binder_object fbo;
|
|
|
|
|
|
|
|
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
break;
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_SET_CONTEXT_MGR:
|
2019-01-15 01:10:21 +08:00
|
|
|
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
|
2014-06-04 03:27:21 +08:00
|
|
|
if (ret)
|
2011-11-30 19:18:14 +08:00
|
|
|
goto err;
|
|
|
|
break;
|
|
|
|
case BINDER_THREAD_EXIT:
|
2012-10-31 01:05:43 +08:00
|
|
|
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, thread->pid);
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_thread_release(proc, thread);
|
2011-11-30 19:18:14 +08:00
|
|
|
thread = NULL;
|
|
|
|
break;
|
2014-04-15 18:03:05 +08:00
|
|
|
case BINDER_VERSION: {
|
|
|
|
struct binder_version __user *ver = ubuf;
|
|
|
|
|
|
|
|
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
|
|
|
|
&ver->protocol_version)) {
|
2011-11-30 19:18:14 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
break;
|
2014-04-15 18:03:05 +08:00
|
|
|
}
|
2018-09-07 21:38:37 +08:00
|
|
|
case BINDER_GET_NODE_INFO_FOR_REF: {
|
|
|
|
struct binder_node_info_for_ref info;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2017-08-31 16:04:24 +08:00
|
|
|
case BINDER_GET_NODE_DEBUG_INFO: {
|
|
|
|
struct binder_node_debug_info info;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = binder_ioctl_get_node_debug_info(proc, &info);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2021-03-16 09:16:28 +08:00
|
|
|
case BINDER_FREEZE: {
|
|
|
|
struct binder_freeze_info info;
|
|
|
|
struct binder_proc **target_procs = NULL, *target_proc;
|
|
|
|
int target_procs_count = 0, i = 0;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
|
|
if (target_proc->pid == info.pid)
|
|
|
|
target_procs_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (target_procs_count == 0) {
|
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
target_procs = kcalloc(target_procs_count,
|
|
|
|
sizeof(struct binder_proc *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!target_procs) {
|
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
|
|
|
if (target_proc->pid != info.pid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
binder_inner_proc_lock(target_proc);
|
|
|
|
target_proc->tmp_ref++;
|
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
|
|
|
|
|
target_procs[i++] = target_proc;
|
|
|
|
}
|
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < target_procs_count; i++) {
|
|
|
|
if (ret >= 0)
|
|
|
|
ret = binder_ioctl_freeze(&info,
|
|
|
|
target_procs[i]);
|
|
|
|
|
|
|
|
binder_proc_dec_tmpref(target_procs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(target_procs);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
break;
|
|
|
|
}
|
2021-03-16 09:16:30 +08:00
|
|
|
case BINDER_GET_FROZEN_INFO: {
|
|
|
|
struct binder_frozen_status_info info;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = binder_ioctl_get_freezer_info(&info);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2021-04-09 17:40:46 +08:00
|
|
|
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
|
|
|
|
uint32_t enable;
|
|
|
|
|
|
|
|
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
|
2021-05-07 03:37:25 +08:00
|
|
|
ret = -EFAULT;
|
2021-04-09 17:40:46 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
binder_inner_proc_lock(proc);
|
|
|
|
proc->oneway_spam_detection_enabled = (bool)enable;
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
break;
|
|
|
|
}
|
2022-04-30 07:56:41 +08:00
|
|
|
case BINDER_GET_EXTENDED_ERROR:
|
|
|
|
ret = binder_ioctl_get_extended_error(thread, ubuf);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
|
|
|
break;
|
2011-11-30 19:18:14 +08:00
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
err:
|
|
|
|
if (thread)
|
2017-06-30 03:01:49 +08:00
|
|
|
thread->looper_need_return = false;
|
2011-11-30 19:18:14 +08:00
|
|
|
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
2021-03-16 09:16:29 +08:00
|
|
|
if (ret && ret != -EINTR)
|
2012-10-31 01:05:43 +08:00
|
|
|
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
2012-10-17 06:29:53 +08:00
|
|
|
err_unlocked:
|
|
|
|
trace_binder_ioctl_done(ret);
|
2011-11-30 19:18:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_vma_open(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = vma->vm_private_data;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, vma->vm_start, vma->vm_end,
|
|
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_vma_close(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = vma->vm_private_data;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
2012-10-31 01:05:43 +08:00
|
|
|
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->pid, vma->vm_start, vma->vm_end,
|
|
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
2017-06-30 03:01:40 +08:00
|
|
|
binder_alloc_vma_close(&proc->alloc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2018-04-24 00:24:00 +08:00
|
|
|
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
|
2014-06-02 20:47:59 +08:00
|
|
|
{
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
|
2015-09-10 06:39:26 +08:00
|
|
|
static const struct vm_operations_struct binder_vm_ops = {
|
2011-11-30 19:18:14 +08:00
|
|
|
.open = binder_vma_open,
|
|
|
|
.close = binder_vma_close,
|
2014-06-02 20:47:59 +08:00
|
|
|
.fault = binder_vm_fault,
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
|
2017-06-30 03:01:40 +08:00
|
|
|
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
|
|
|
|
if (proc->tsk != current->group_leader)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
|
|
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
|
|
__func__, proc->pid, vma->vm_start, vma->vm_end,
|
|
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
|
|
|
|
|
|
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
|
2020-09-29 09:52:16 +08:00
|
|
|
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
|
|
|
|
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
|
|
|
|
return -EPERM;
|
2017-06-30 03:01:40 +08:00
|
|
|
}
|
2023-01-27 03:37:49 +08:00
|
|
|
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
|
2018-05-07 22:15:37 +08:00
|
|
|
|
2017-06-30 03:01:40 +08:00
|
|
|
vma->vm_ops = &binder_vm_ops;
|
|
|
|
vma->vm_private_data = proc;
|
|
|
|
|
2020-09-29 09:52:16 +08:00
|
|
|
return binder_alloc_mmap_handler(&proc->alloc, vma);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_open(struct inode *nodp, struct file *filp)
|
|
|
|
{
|
2020-01-10 23:44:01 +08:00
|
|
|
struct binder_proc *proc, *itr;
|
2017-02-04 06:40:48 +08:00
|
|
|
struct binder_device *binder_dev;
|
2019-09-04 00:16:55 +08:00
|
|
|
struct binderfs_info *info;
|
|
|
|
struct dentry *binder_binderfs_dir_entry_proc = NULL;
|
2020-01-10 23:44:01 +08:00
|
|
|
bool existing_pid = false;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-12-29 17:03:37 +08:00
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
2011-11-30 19:18:14 +08:00
|
|
|
current->group_leader->pid, current->pid);
|
|
|
|
|
|
|
|
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
|
|
|
if (proc == NULL)
|
|
|
|
return -ENOMEM;
|
2017-06-30 03:02:00 +08:00
|
|
|
spin_lock_init(&proc->inner_lock);
|
|
|
|
spin_lock_init(&proc->outer_lock);
|
2017-06-30 03:01:36 +08:00
|
|
|
get_task_struct(current->group_leader);
|
|
|
|
proc->tsk = current->group_leader;
|
2021-10-13 00:56:12 +08:00
|
|
|
proc->cred = get_cred(filp->f_cred);
|
2011-11-30 19:18:14 +08:00
|
|
|
INIT_LIST_HEAD(&proc->todo);
|
2021-03-16 09:16:28 +08:00
|
|
|
init_waitqueue_head(&proc->freeze_wait);
|
2011-11-30 19:18:14 +08:00
|
|
|
proc->default_priority = task_nice(current);
|
binder: implement binderfs
As discussed at Linux Plumbers Conference 2018 in Vancouver [1] this is the
implementation of binderfs.
/* Abstract */
binderfs is a backwards-compatible filesystem for Android's binder ipc
mechanism. Each ipc namespace will mount a new binderfs instance. Mounting
binderfs multiple times at different locations in the same ipc namespace
will not cause a new super block to be allocated and hence it will be the
same filesystem instance.
Each new binderfs mount will have its own set of binder devices only
visible in the ipc namespace it has been mounted in. All devices in a new
binderfs mount will follow the scheme binder%d and numbering will always
start at 0.
/* Backwards compatibility */
Devices requested in the Kconfig via CONFIG_ANDROID_BINDER_DEVICES for the
initial ipc namespace will work as before. They will be registered via
misc_register() and appear in the devtmpfs mount. Specifically, the
standard devices binder, hwbinder, and vndbinder will all appear in their
standard locations in /dev. Mounting or unmounting the binderfs mount in
the initial ipc namespace will have no effect on these devices, i.e. they
will neither show up in the binderfs mount nor will they disappear when the
binderfs mount is gone.
/* binder-control */
Each new binderfs instance comes with a binder-control device. No other
devices will be present at first. The binder-control device can be used to
dynamically allocate binder devices. All requests operate on the binderfs
mount the binder-control device resides in.
Assuming a new instance of binderfs has been mounted at /dev/binderfs
via mount -t binderfs binderfs /dev/binderfs. Then a request to create a
new binder device can be made as illustrated in [2].
Binderfs devices can simply be removed via unlink().
/* Implementation details */
- dynamic major number allocation:
When binderfs is registered as a new filesystem it will dynamically
allocate a new major number. The allocated major number will be returned
in struct binderfs_device when a new binder device is allocated.
- global minor number tracking:
Minor are tracked in a global idr struct that is capped at
BINDERFS_MAX_MINOR. The minor number tracker is protected by a global
mutex. This is the only point of contention between binderfs mounts.
- struct binderfs_info:
Each binderfs super block has its own struct binderfs_info that tracks
specific details about a binderfs instance:
- ipc namespace
- dentry of the binder-control device
- root uid and root gid of the user namespace the binderfs instance
was mounted in
- mountable by user namespace root:
binderfs can be mounted by user namespace root in a non-initial user
namespace. The devices will be owned by user namespace root.
- binderfs binder devices without misc infrastructure:
New binder devices associated with a binderfs mount do not use the
full misc_register() infrastructure.
The misc_register() infrastructure can only create new devices in the
host's devtmpfs mount. binderfs does however only make devices appear
under its own mountpoint and thus allocates new character device nodes
from the inode of the root dentry of the super block. This will have
the side-effect that binderfs specific device nodes do not appear in
sysfs. This behavior is similar to devpts allocated pts devices and
has no effect on the functionality of the ipc mechanism itself.
[1]: https://goo.gl/JL2tfX
[2]: program to allocate a new binderfs binder device:
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/android/binder_ctl.h>
int main(int argc, char *argv[])
{
int fd, ret, saved_errno;
size_t len;
struct binderfs_device device = { 0 };
if (argc < 2)
exit(EXIT_FAILURE);
len = strlen(argv[1]);
if (len > BINDERFS_MAX_NAME)
exit(EXIT_FAILURE);
memcpy(device.name, argv[1], len);
fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("%s - Failed to open binder-control device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
if (ret < 0) {
printf("%s - Failed to allocate new binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
printf("Allocated new binder device with major %d, minor %d, and "
"name %s\n", device.major, device.minor,
device.name);
exit(EXIT_SUCCESS);
}
Cc: Martijn Coenen <maco@android.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-12-14 20:11:14 +08:00
|
|
|
/* binderfs stashes devices in i_private */
|
2019-09-04 00:16:55 +08:00
|
|
|
if (is_binderfs_device(nodp)) {
|
2020-03-04 00:43:40 +08:00
|
|
|
binder_dev = nodp->i_private;
|
2019-09-04 00:16:55 +08:00
|
|
|
info = nodp->i_sb->s_fs_info;
|
|
|
|
binder_binderfs_dir_entry_proc = info->proc_log_dir;
|
|
|
|
} else {
|
binder: implement binderfs
As discussed at Linux Plumbers Conference 2018 in Vancouver [1] this is the
implementation of binderfs.
/* Abstract */
binderfs is a backwards-compatible filesystem for Android's binder ipc
mechanism. Each ipc namespace will mount a new binderfs instance. Mounting
binderfs multiple times at different locations in the same ipc namespace
will not cause a new super block to be allocated and hence it will be the
same filesystem instance.
Each new binderfs mount will have its own set of binder devices only
visible in the ipc namespace it has been mounted in. All devices in a new
binderfs mount will follow the scheme binder%d and numbering will always
start at 0.
/* Backwards compatibility */
Devices requested in the Kconfig via CONFIG_ANDROID_BINDER_DEVICES for the
initial ipc namespace will work as before. They will be registered via
misc_register() and appear in the devtmpfs mount. Specifically, the
standard devices binder, hwbinder, and vndbinder will all appear in their
standard locations in /dev. Mounting or unmounting the binderfs mount in
the initial ipc namespace will have no effect on these devices, i.e. they
will neither show up in the binderfs mount nor will they disappear when the
binderfs mount is gone.
/* binder-control */
Each new binderfs instance comes with a binder-control device. No other
devices will be present at first. The binder-control device can be used to
dynamically allocate binder devices. All requests operate on the binderfs
mount the binder-control device resides in.
Assuming a new instance of binderfs has been mounted at /dev/binderfs
via mount -t binderfs binderfs /dev/binderfs. Then a request to create a
new binder device can be made as illustrated in [2].
Binderfs devices can simply be removed via unlink().
/* Implementation details */
- dynamic major number allocation:
When binderfs is registered as a new filesystem it will dynamically
allocate a new major number. The allocated major number will be returned
in struct binderfs_device when a new binder device is allocated.
- global minor number tracking:
Minor are tracked in a global idr struct that is capped at
BINDERFS_MAX_MINOR. The minor number tracker is protected by a global
mutex. This is the only point of contention between binderfs mounts.
- struct binderfs_info:
Each binderfs super block has its own struct binderfs_info that tracks
specific details about a binderfs instance:
- ipc namespace
- dentry of the binder-control device
- root uid and root gid of the user namespace the binderfs instance
was mounted in
- mountable by user namespace root:
binderfs can be mounted by user namespace root in a non-initial user
namespace. The devices will be owned by user namespace root.
- binderfs binder devices without misc infrastructure:
New binder devices associated with a binderfs mount do not use the
full misc_register() infrastructure.
The misc_register() infrastructure can only create new devices in the
host's devtmpfs mount. binderfs does however only make devices appear
under its own mountpoint and thus allocates new character device nodes
from the inode of the root dentry of the super block. This will have
the side-effect that binderfs specific device nodes do not appear in
sysfs. This behavior is similar to devpts allocated pts devices and
has no effect on the functionality of the ipc mechanism itself.
[1]: https://goo.gl/JL2tfX
[2]: program to allocate a new binderfs binder device:
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/android/binder_ctl.h>
int main(int argc, char *argv[])
{
int fd, ret, saved_errno;
size_t len;
struct binderfs_device device = { 0 };
if (argc < 2)
exit(EXIT_FAILURE);
len = strlen(argv[1]);
if (len > BINDERFS_MAX_NAME)
exit(EXIT_FAILURE);
memcpy(device.name, argv[1], len);
fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("%s - Failed to open binder-control device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
if (ret < 0) {
printf("%s - Failed to allocate new binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
printf("Allocated new binder device with major %d, minor %d, and "
"name %s\n", device.major, device.minor,
device.name);
exit(EXIT_SUCCESS);
}
Cc: Martijn Coenen <maco@android.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-12-14 20:11:14 +08:00
|
|
|
binder_dev = container_of(filp->private_data,
|
|
|
|
struct binder_device, miscdev);
|
2019-09-04 00:16:55 +08:00
|
|
|
}
|
2020-03-04 00:43:40 +08:00
|
|
|
refcount_inc(&binder_dev->ref);
|
2017-02-04 06:40:48 +08:00
|
|
|
proc->context = &binder_dev->context;
|
2017-06-30 03:01:40 +08:00
|
|
|
binder_alloc_init(&proc->alloc);
|
2012-10-17 06:29:53 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
|
|
proc->pid = current->group_leader->pid;
|
|
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
2017-08-31 16:04:18 +08:00
|
|
|
INIT_LIST_HEAD(&proc->waiting_threads);
|
2011-11-30 19:18:14 +08:00
|
|
|
filp->private_data = proc;
|
2012-10-17 06:29:53 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
2020-01-10 23:44:01 +08:00
|
|
|
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
|
|
|
if (itr->pid == proc->pid) {
|
|
|
|
existing_pid = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:01:43 +08:00
|
|
|
hlist_add_head(&proc->proc_node, &binder_procs);
|
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
|
2020-01-10 23:44:01 +08:00
|
|
|
if (binder_debugfs_dir_entry_proc && !existing_pid) {
|
2011-11-30 19:18:14 +08:00
|
|
|
char strbuf[11];
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
|
2017-02-04 06:40:47 +08:00
|
|
|
/*
|
2020-01-10 23:44:01 +08:00
|
|
|
* proc debug entries are shared between contexts.
|
|
|
|
* Only create for the first PID to avoid debugfs log spamming
|
|
|
|
* The printing code will anyway print all contexts for a given
|
|
|
|
* PID so this is not a problem.
|
2017-02-04 06:40:47 +08:00
|
|
|
*/
|
2017-12-22 22:07:02 +08:00
|
|
|
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
|
2017-02-04 06:40:47 +08:00
|
|
|
binder_debugfs_dir_entry_proc,
|
|
|
|
(void *)(unsigned long)proc->pid,
|
2018-12-01 09:26:30 +08:00
|
|
|
&proc_fops);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2020-01-10 23:44:01 +08:00
|
|
|
if (binder_binderfs_dir_entry_proc && !existing_pid) {
|
2019-09-04 00:16:55 +08:00
|
|
|
char strbuf[11];
|
|
|
|
struct dentry *binderfs_entry;
|
|
|
|
|
|
|
|
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
|
|
|
|
/*
|
|
|
|
* Similar to debugfs, the process specific log file is shared
|
2020-01-10 23:44:01 +08:00
|
|
|
* between contexts. Only create for the first PID.
|
|
|
|
* This is ok since same as debugfs, the log file will contain
|
|
|
|
* information on all contexts of a given PID.
|
2019-09-04 00:16:55 +08:00
|
|
|
*/
|
|
|
|
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
|
|
|
|
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
|
|
|
|
if (!IS_ERR(binderfs_entry)) {
|
|
|
|
proc->binderfs_entry = binderfs_entry;
|
|
|
|
} else {
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = PTR_ERR(binderfs_entry);
|
2020-01-10 23:44:01 +08:00
|
|
|
pr_warn("Unable to create file %s in binderfs (error %d)\n",
|
|
|
|
strbuf, error);
|
2019-09-04 00:16:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_flush(struct file *filp, fl_owner_t id)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
|
|
|
|
|
|
|
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_deferred_flush(struct binder_proc *proc)
|
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
int wake_count = 0;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
|
|
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2017-06-30 03:01:49 +08:00
|
|
|
thread->looper_need_return = true;
|
2011-11-30 19:18:14 +08:00
|
|
|
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
|
|
|
|
wake_up_interruptible(&thread->wait);
|
|
|
|
wake_count++;
|
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
|
|
"binder_flush: %d woke %d threads\n", proc->pid,
|
|
|
|
wake_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int binder_release(struct inode *nodp, struct file *filp)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc = filp->private_data;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
debugfs_remove(proc->debugfs_entry);
|
2019-09-04 00:16:55 +08:00
|
|
|
|
|
|
|
if (proc->binderfs_entry) {
|
|
|
|
binderfs_remove_file(proc->binderfs_entry);
|
|
|
|
proc->binderfs_entry = NULL;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-12 18:41:59 +08:00
|
|
|
static int binder_node_release(struct binder_node *node, int refs)
|
|
|
|
{
|
|
|
|
struct binder_ref *ref;
|
|
|
|
int death = 0;
|
2017-06-30 03:02:01 +08:00
|
|
|
struct binder_proc *proc = node->proc;
|
2013-03-12 18:41:59 +08:00
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_release_work(proc, &node->async_todo);
|
2017-06-30 03:02:01 +08:00
|
|
|
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_dequeue_work_ilocked(&node->work);
|
2017-06-30 03:01:59 +08:00
|
|
|
/*
|
|
|
|
* The caller must have taken a temporary ref on the node,
|
|
|
|
*/
|
|
|
|
BUG_ON(!node->tmp_refs);
|
|
|
|
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_free_node(node);
|
2013-03-12 18:41:59 +08:00
|
|
|
|
|
|
|
return refs;
|
|
|
|
}
|
|
|
|
|
|
|
|
node->proc = NULL;
|
|
|
|
node->local_strong_refs = 0;
|
|
|
|
node->local_weak_refs = 0;
|
2017-06-30 03:02:01 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2017-06-30 03:01:43 +08:00
|
|
|
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
2013-03-12 18:41:59 +08:00
|
|
|
hlist_add_head(&node->dead_node, &binder_dead_nodes);
|
2017-06-30 03:01:43 +08:00
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
2013-03-12 18:41:59 +08:00
|
|
|
|
|
|
|
hlist_for_each_entry(ref, &node->refs, node_entry) {
|
|
|
|
refs++;
|
2017-06-30 03:02:10 +08:00
|
|
|
/*
|
|
|
|
* Need the node lock to synchronize
|
|
|
|
* with new notification requests and the
|
|
|
|
* inner lock to synchronize with queued
|
|
|
|
* death notifications.
|
|
|
|
*/
|
|
|
|
binder_inner_proc_lock(ref->proc);
|
|
|
|
if (!ref->death) {
|
|
|
|
binder_inner_proc_unlock(ref->proc);
|
2014-02-18 05:58:29 +08:00
|
|
|
continue;
|
2017-06-30 03:02:10 +08:00
|
|
|
}
|
2013-03-12 18:41:59 +08:00
|
|
|
|
|
|
|
death++;
|
|
|
|
|
2017-06-30 03:02:10 +08:00
|
|
|
BUG_ON(!list_empty(&ref->death->work.entry));
|
|
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
|
|
binder_enqueue_work_ilocked(&ref->death->work,
|
|
|
|
&ref->proc->todo);
|
2017-08-31 16:04:19 +08:00
|
|
|
binder_wakeup_proc_ilocked(ref->proc);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(ref->proc);
|
2013-03-12 18:41:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
|
|
"node %d now dead, refs %d, death %d\n",
|
|
|
|
node->debug_id, refs, death);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
2017-06-30 03:01:59 +08:00
|
|
|
binder_put_node(node);
|
2013-03-12 18:41:59 +08:00
|
|
|
|
|
|
|
return refs;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static void binder_deferred_release(struct binder_proc *proc)
|
|
|
|
{
|
2017-02-04 06:40:46 +08:00
|
|
|
struct binder_context *context = proc->context;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct rb_node *n;
|
2017-06-30 03:01:40 +08:00
|
|
|
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
hlist_del(&proc->proc_node);
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&binder_procs_lock);
|
2013-03-12 18:42:00 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&context->context_mgr_node_lock);
|
2017-02-04 06:40:46 +08:00
|
|
|
if (context->binder_context_mgr_node &&
|
|
|
|
context->binder_context_mgr_node->proc == proc) {
|
2011-11-30 19:18:14 +08:00
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
2013-03-12 18:42:02 +08:00
|
|
|
"%s: %d context_mgr_node gone\n",
|
|
|
|
__func__, proc->pid);
|
2017-02-04 06:40:46 +08:00
|
|
|
context->binder_context_mgr_node = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&context->context_mgr_node_lock);
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2017-06-30 03:01:57 +08:00
|
|
|
/*
|
|
|
|
* Make sure proc stays alive after we
|
|
|
|
* remove all the threads
|
|
|
|
*/
|
|
|
|
proc->tmp_ref++;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
proc->is_dead = true;
|
2021-03-16 09:16:28 +08:00
|
|
|
proc->is_frozen = false;
|
2021-03-16 09:16:30 +08:00
|
|
|
proc->sync_recv = false;
|
|
|
|
proc->async_recv = false;
|
2011-11-30 19:18:14 +08:00
|
|
|
threads = 0;
|
|
|
|
active_transactions = 0;
|
|
|
|
while ((n = rb_first(&proc->threads))) {
|
2013-03-12 18:42:00 +08:00
|
|
|
struct binder_thread *thread;
|
|
|
|
|
|
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
threads++;
|
2017-06-30 03:01:57 +08:00
|
|
|
active_transactions += binder_thread_release(proc, thread);
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2013-03-12 18:42:00 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
nodes = 0;
|
|
|
|
incoming_refs = 0;
|
|
|
|
while ((n = rb_first(&proc->nodes))) {
|
2013-03-12 18:42:00 +08:00
|
|
|
struct binder_node *node;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2013-03-12 18:42:00 +08:00
|
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
2011-11-30 19:18:14 +08:00
|
|
|
nodes++;
|
2017-06-30 03:01:59 +08:00
|
|
|
/*
|
|
|
|
* take a temporary ref on the node before
|
|
|
|
* calling binder_node_release() which will either
|
|
|
|
* kfree() the node or call binder_put_node()
|
|
|
|
*/
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
2011-11-30 19:18:14 +08:00
|
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2013-03-12 18:41:59 +08:00
|
|
|
incoming_refs = binder_node_release(node, incoming_refs);
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2013-03-12 18:42:00 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
outgoing_refs = 0;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
while ((n = rb_first(&proc->refs_by_desc))) {
|
2013-03-12 18:42:00 +08:00
|
|
|
struct binder_ref *ref;
|
|
|
|
|
|
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
2011-11-30 19:18:14 +08:00
|
|
|
outgoing_refs++;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_cleanup_ref_olocked(ref);
|
|
|
|
binder_proc_unlock(proc);
|
2017-06-30 03:01:58 +08:00
|
|
|
binder_free_ref(ref);
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2013-03-12 18:42:00 +08:00
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_release_work(proc, &proc->todo);
|
|
|
|
binder_release_work(proc, &proc->delivered_death);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
2017-06-30 03:01:40 +08:00
|
|
|
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
|
2013-03-12 18:42:02 +08:00
|
|
|
__func__, proc->pid, threads, nodes, incoming_refs,
|
2017-06-30 03:01:40 +08:00
|
|
|
outgoing_refs, active_transactions);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
binder_proc_dec_tmpref(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void binder_deferred_func(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct binder_proc *proc;
|
|
|
|
|
|
|
|
int defer;
|
2014-05-01 00:30:23 +08:00
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
do {
|
|
|
|
mutex_lock(&binder_deferred_lock);
|
|
|
|
if (!hlist_empty(&binder_deferred_list)) {
|
|
|
|
proc = hlist_entry(binder_deferred_list.first,
|
|
|
|
struct binder_proc, deferred_work_node);
|
|
|
|
hlist_del_init(&proc->deferred_work_node);
|
|
|
|
defer = proc->deferred_work;
|
|
|
|
proc->deferred_work = 0;
|
|
|
|
} else {
|
|
|
|
proc = NULL;
|
|
|
|
defer = 0;
|
|
|
|
}
|
|
|
|
mutex_unlock(&binder_deferred_lock);
|
|
|
|
|
|
|
|
if (defer & BINDER_DEFERRED_FLUSH)
|
|
|
|
binder_deferred_flush(proc);
|
|
|
|
|
|
|
|
if (defer & BINDER_DEFERRED_RELEASE)
|
|
|
|
binder_deferred_release(proc); /* frees proc */
|
|
|
|
} while (proc);
|
|
|
|
}
|
|
|
|
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
|
|
|
|
|
|
|
|
static void
|
|
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
|
|
|
|
{
|
|
|
|
mutex_lock(&binder_deferred_lock);
|
|
|
|
proc->deferred_work |= defer;
|
|
|
|
if (hlist_unhashed(&proc->deferred_work_node)) {
|
|
|
|
hlist_add_head(&proc->deferred_work_node,
|
|
|
|
&binder_deferred_list);
|
2016-08-14 00:46:24 +08:00
|
|
|
schedule_work(&binder_deferred_work);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&binder_deferred_lock);
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:09 +08:00
|
|
|
static void print_binder_transaction_ilocked(struct seq_file *m,
|
|
|
|
struct binder_proc *proc,
|
|
|
|
const char *prefix,
|
|
|
|
struct binder_transaction *t)
|
2009-04-29 11:57:50 +08:00
|
|
|
{
|
2017-06-30 03:02:09 +08:00
|
|
|
struct binder_proc *to_proc;
|
|
|
|
struct binder_buffer *buffer = t->buffer;
|
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_lock(&t->lock);
|
2017-06-30 03:02:09 +08:00
|
|
|
to_proc = t->to_proc;
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m,
|
2018-02-08 05:57:37 +08:00
|
|
|
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
|
2009-04-29 11:57:50 +08:00
|
|
|
prefix, t->debug_id, t,
|
|
|
|
t->from ? t->from->proc->pid : 0,
|
|
|
|
t->from ? t->from->pid : 0,
|
2017-06-30 03:02:09 +08:00
|
|
|
to_proc ? to_proc->pid : 0,
|
2009-04-29 11:57:50 +08:00
|
|
|
t->to_thread ? t->to_thread->pid : 0,
|
|
|
|
t->code, t->flags, t->priority, t->need_reply);
|
2017-06-30 03:01:57 +08:00
|
|
|
spin_unlock(&t->lock);
|
|
|
|
|
2017-06-30 03:02:09 +08:00
|
|
|
if (proc != to_proc) {
|
|
|
|
/*
|
|
|
|
* Can only safely deref buffer if we are holding the
|
|
|
|
* correct proc inner lock for this node
|
|
|
|
*/
|
|
|
|
seq_puts(m, "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffer == NULL) {
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, " buffer free\n");
|
|
|
|
return;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:09 +08:00
|
|
|
if (buffer->target_node)
|
|
|
|
seq_printf(m, " node %d", buffer->target_node->debug_id);
|
2018-02-08 05:57:37 +08:00
|
|
|
seq_printf(m, " size %zd:%zd data %pK\n",
|
2017-06-30 03:02:09 +08:00
|
|
|
buffer->data_size, buffer->offsets_size,
|
2019-02-09 02:35:20 +08:00
|
|
|
buffer->user_data);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:09 +08:00
|
|
|
static void print_binder_work_ilocked(struct seq_file *m,
|
|
|
|
struct binder_proc *proc,
|
|
|
|
const char *prefix,
|
|
|
|
const char *transaction_prefix,
|
|
|
|
struct binder_work *w)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_node *node;
|
|
|
|
struct binder_transaction *t;
|
|
|
|
|
|
|
|
switch (w->type) {
|
|
|
|
case BINDER_WORK_TRANSACTION:
|
|
|
|
t = container_of(w, struct binder_transaction, work);
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_transaction_ilocked(
|
|
|
|
m, proc, transaction_prefix, t);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
2017-06-30 03:01:55 +08:00
|
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
|
|
struct binder_error *e = container_of(
|
|
|
|
w, struct binder_error, work);
|
|
|
|
|
|
|
|
seq_printf(m, "%stransaction error: %u\n",
|
|
|
|
prefix, e->cmd);
|
|
|
|
} break;
|
2011-11-30 19:18:14 +08:00
|
|
|
case BINDER_WORK_TRANSACTION_COMPLETE:
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%stransaction complete\n", prefix);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
case BINDER_WORK_NODE:
|
|
|
|
node = container_of(w, struct binder_node, work);
|
2014-02-22 06:40:26 +08:00
|
|
|
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
|
|
|
|
prefix, node->debug_id,
|
|
|
|
(u64)node->ptr, (u64)node->cookie);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
case BINDER_WORK_DEAD_BINDER:
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%shas dead binder\n", prefix);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%shas cleared dead binder\n", prefix);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%shas cleared death notification\n", prefix);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
default:
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
static void print_binder_thread_ilocked(struct seq_file *m,
|
|
|
|
struct binder_thread *thread,
|
|
|
|
int print_always)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_transaction *t;
|
|
|
|
struct binder_work *w;
|
2009-04-29 11:57:50 +08:00
|
|
|
size_t start_pos = m->count;
|
|
|
|
size_t header_pos;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:57 +08:00
|
|
|
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
|
2017-06-30 03:01:49 +08:00
|
|
|
thread->pid, thread->looper,
|
2017-06-30 03:01:57 +08:00
|
|
|
thread->looper_need_return,
|
|
|
|
atomic_read(&thread->tmp_ref));
|
2009-04-29 11:57:50 +08:00
|
|
|
header_pos = m->count;
|
2011-11-30 19:18:14 +08:00
|
|
|
t = thread->transaction_stack;
|
|
|
|
while (t) {
|
|
|
|
if (t->from == thread) {
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
|
|
|
" outgoing transaction", t);
|
2011-11-30 19:18:14 +08:00
|
|
|
t = t->from_parent;
|
|
|
|
} else if (t->to_thread == thread) {
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
2009-04-29 11:57:50 +08:00
|
|
|
" incoming transaction", t);
|
2011-11-30 19:18:14 +08:00
|
|
|
t = t->to_parent;
|
|
|
|
} else {
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_transaction_ilocked(m, thread->proc,
|
|
|
|
" bad transaction", t);
|
2011-11-30 19:18:14 +08:00
|
|
|
t = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
list_for_each_entry(w, &thread->todo, entry) {
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_work_ilocked(m, thread->proc, " ",
|
2017-06-30 03:02:02 +08:00
|
|
|
" pending transaction", w);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2009-04-29 11:57:50 +08:00
|
|
|
if (!print_always && m->count == header_pos)
|
|
|
|
m->count = start_pos;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:04 +08:00
|
|
|
static void print_binder_node_nilocked(struct seq_file *m,
|
|
|
|
struct binder_node *node)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_ref *ref;
|
|
|
|
struct binder_work *w;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
2011-11-30 19:18:14 +08:00
|
|
|
count++;
|
|
|
|
|
2017-06-30 03:01:59 +08:00
|
|
|
seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
2014-02-22 06:40:26 +08:00
|
|
|
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
2009-04-29 11:57:50 +08:00
|
|
|
node->has_strong_ref, node->has_weak_ref,
|
|
|
|
node->local_strong_refs, node->local_weak_refs,
|
2017-06-30 03:01:59 +08:00
|
|
|
node->internal_strong_refs, count, node->tmp_refs);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (count) {
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, " proc");
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " %d", ref->proc->pid);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, "\n");
|
2017-06-30 03:02:02 +08:00
|
|
|
if (node->proc) {
|
|
|
|
list_for_each_entry(w, &node->async_todo, entry)
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_work_ilocked(m, node->proc, " ",
|
2017-06-30 03:02:02 +08:00
|
|
|
" pending async transaction", w);
|
|
|
|
}
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-30 03:02:08 +08:00
|
|
|
static void print_binder_ref_olocked(struct seq_file *m,
|
|
|
|
struct binder_ref *ref)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_lock(ref->node);
|
2017-06-30 03:01:58 +08:00
|
|
|
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
|
|
|
|
ref->data.debug_id, ref->data.desc,
|
|
|
|
ref->node->proc ? "" : "dead ",
|
|
|
|
ref->node->debug_id, ref->data.strong,
|
|
|
|
ref->data.weak, ref->death);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(ref->node);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
static void print_binder_proc(struct seq_file *m,
|
|
|
|
struct binder_proc *proc, int print_all)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_work *w;
|
|
|
|
struct rb_node *n;
|
2009-04-29 11:57:50 +08:00
|
|
|
size_t start_pos = m->count;
|
|
|
|
size_t header_pos;
|
2017-06-30 03:02:04 +08:00
|
|
|
struct binder_node *last_node = NULL;
|
2009-04-29 11:57:50 +08:00
|
|
|
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
2017-02-04 06:40:47 +08:00
|
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
2009-04-29 11:57:50 +08:00
|
|
|
header_pos = m->count;
|
|
|
|
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
2017-06-30 03:02:02 +08:00
|
|
|
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
|
2009-04-29 11:57:50 +08:00
|
|
|
rb_node), print_all);
|
2017-06-30 03:02:04 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_node *node = rb_entry(n, struct binder_node,
|
|
|
|
rb_node);
|
2018-12-06 07:19:26 +08:00
|
|
|
if (!print_all && !node->has_async_transaction)
|
|
|
|
continue;
|
|
|
|
|
2017-06-30 03:02:04 +08:00
|
|
|
/*
|
|
|
|
* take a temporary reference on the node so it
|
|
|
|
* survives and isn't removed from the tree
|
|
|
|
* while we print it.
|
|
|
|
*/
|
|
|
|
binder_inc_node_tmpref_ilocked(node);
|
|
|
|
/* Need to drop inner lock to take node lock */
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (last_node)
|
|
|
|
binder_put_node(last_node);
|
|
|
|
binder_node_inner_lock(node);
|
|
|
|
print_binder_node_nilocked(m, node);
|
|
|
|
binder_node_inner_unlock(node);
|
|
|
|
last_node = node;
|
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
if (last_node)
|
|
|
|
binder_put_node(last_node);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
if (print_all) {
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
for (n = rb_first(&proc->refs_by_desc);
|
2009-04-29 11:57:50 +08:00
|
|
|
n != NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
n = rb_next(n))
|
2017-06-30 03:02:08 +08:00
|
|
|
print_binder_ref_olocked(m, rb_entry(n,
|
|
|
|
struct binder_ref,
|
|
|
|
rb_node_desc));
|
|
|
|
binder_proc_unlock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:01:40 +08:00
|
|
|
binder_alloc_print_allocated(m, &proc->alloc);
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
list_for_each_entry(w, &proc->todo, entry)
|
2017-06-30 03:02:09 +08:00
|
|
|
print_binder_work_ilocked(m, proc, " ",
|
|
|
|
" pending transaction", w);
|
2011-11-30 19:18:14 +08:00
|
|
|
list_for_each_entry(w, &proc->delivered_death, entry) {
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, " has delivered dead binder\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
if (!print_all && m->count == header_pos)
|
|
|
|
m->count = start_pos;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2012-12-22 07:00:45 +08:00
|
|
|
static const char * const binder_return_strings[] = {
|
2011-11-30 19:18:14 +08:00
|
|
|
"BR_ERROR",
|
|
|
|
"BR_OK",
|
|
|
|
"BR_TRANSACTION",
|
|
|
|
"BR_REPLY",
|
|
|
|
"BR_ACQUIRE_RESULT",
|
|
|
|
"BR_DEAD_REPLY",
|
|
|
|
"BR_TRANSACTION_COMPLETE",
|
|
|
|
"BR_INCREFS",
|
|
|
|
"BR_ACQUIRE",
|
|
|
|
"BR_RELEASE",
|
|
|
|
"BR_DECREFS",
|
|
|
|
"BR_ATTEMPT_ACQUIRE",
|
|
|
|
"BR_NOOP",
|
|
|
|
"BR_SPAWN_LOOPER",
|
|
|
|
"BR_FINISHED",
|
|
|
|
"BR_DEAD_BINDER",
|
|
|
|
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
2021-04-09 17:40:45 +08:00
|
|
|
"BR_FAILED_REPLY",
|
|
|
|
"BR_FROZEN_REPLY",
|
2021-04-09 17:40:46 +08:00
|
|
|
"BR_ONEWAY_SPAM_SUSPECT",
|
2022-11-24 04:16:54 +08:00
|
|
|
"BR_TRANSACTION_PENDING_FROZEN"
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
|
2012-12-22 07:00:45 +08:00
|
|
|
static const char * const binder_command_strings[] = {
|
2011-11-30 19:18:14 +08:00
|
|
|
"BC_TRANSACTION",
|
|
|
|
"BC_REPLY",
|
|
|
|
"BC_ACQUIRE_RESULT",
|
|
|
|
"BC_FREE_BUFFER",
|
|
|
|
"BC_INCREFS",
|
|
|
|
"BC_ACQUIRE",
|
|
|
|
"BC_RELEASE",
|
|
|
|
"BC_DECREFS",
|
|
|
|
"BC_INCREFS_DONE",
|
|
|
|
"BC_ACQUIRE_DONE",
|
|
|
|
"BC_ATTEMPT_ACQUIRE",
|
|
|
|
"BC_REGISTER_LOOPER",
|
|
|
|
"BC_ENTER_LOOPER",
|
|
|
|
"BC_EXIT_LOOPER",
|
|
|
|
"BC_REQUEST_DEATH_NOTIFICATION",
|
|
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
2017-02-04 06:40:51 +08:00
|
|
|
"BC_DEAD_BINDER_DONE",
|
|
|
|
"BC_TRANSACTION_SG",
|
|
|
|
"BC_REPLY_SG",
|
2011-11-30 19:18:14 +08:00
|
|
|
};
|
|
|
|
|
2012-12-22 07:00:45 +08:00
|
|
|
static const char * const binder_objstat_strings[] = {
|
2011-11-30 19:18:14 +08:00
|
|
|
"proc",
|
|
|
|
"thread",
|
|
|
|
"node",
|
|
|
|
"ref",
|
|
|
|
"death",
|
|
|
|
"transaction",
|
|
|
|
"transaction_complete"
|
|
|
|
};
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
static void print_binder_stats(struct seq_file *m, const char *prefix,
|
|
|
|
struct binder_stats *stats)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
|
2009-04-29 11:57:50 +08:00
|
|
|
ARRAY_SIZE(binder_command_strings));
|
2011-11-30 19:18:14 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
|
2017-06-30 03:01:44 +08:00
|
|
|
int temp = atomic_read(&stats->bc[i]);
|
|
|
|
|
|
|
|
if (temp)
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
2017-06-30 03:01:44 +08:00
|
|
|
binder_command_strings[i], temp);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
|
2009-04-29 11:57:50 +08:00
|
|
|
ARRAY_SIZE(binder_return_strings));
|
2011-11-30 19:18:14 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
|
2017-06-30 03:01:44 +08:00
|
|
|
int temp = atomic_read(&stats->br[i]);
|
|
|
|
|
|
|
|
if (temp)
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
2017-06-30 03:01:44 +08:00
|
|
|
binder_return_strings[i], temp);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
2009-04-29 11:57:50 +08:00
|
|
|
ARRAY_SIZE(binder_objstat_strings));
|
2011-11-30 19:18:14 +08:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
2009-04-29 11:57:50 +08:00
|
|
|
ARRAY_SIZE(stats->obj_deleted));
|
2011-11-30 19:18:14 +08:00
|
|
|
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
|
2017-06-30 03:01:44 +08:00
|
|
|
int created = atomic_read(&stats->obj_created[i]);
|
|
|
|
int deleted = atomic_read(&stats->obj_deleted[i]);
|
|
|
|
|
|
|
|
if (created || deleted)
|
|
|
|
seq_printf(m, "%s%s: active %d total %d\n",
|
|
|
|
prefix,
|
2009-04-29 11:57:50 +08:00
|
|
|
binder_objstat_strings[i],
|
2017-06-30 03:01:44 +08:00
|
|
|
created - deleted,
|
|
|
|
created);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
static void print_binder_proc_stats(struct seq_file *m,
|
|
|
|
struct binder_proc *proc)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_work *w;
|
2017-08-31 16:04:18 +08:00
|
|
|
struct binder_thread *thread;
|
2011-11-30 19:18:14 +08:00
|
|
|
struct rb_node *n;
|
2017-08-31 16:04:18 +08:00
|
|
|
int count, strong, weak, ready_threads;
|
2017-06-30 03:02:05 +08:00
|
|
|
size_t free_async_space =
|
|
|
|
binder_alloc_get_free_async_space(&proc->alloc);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
2017-02-04 06:40:47 +08:00
|
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
2011-11-30 19:18:14 +08:00
|
|
|
count = 0;
|
2017-08-31 16:04:18 +08:00
|
|
|
ready_threads = 0;
|
2017-06-30 03:02:05 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
|
|
count++;
|
2017-08-31 16:04:18 +08:00
|
|
|
|
|
|
|
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
|
|
|
|
ready_threads++;
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " threads: %d\n", count);
|
|
|
|
seq_printf(m, " requested threads: %d+%d/%d\n"
|
2011-11-30 19:18:14 +08:00
|
|
|
" ready threads %d\n"
|
|
|
|
" free async space %zd\n", proc->requested_threads,
|
|
|
|
proc->requested_threads_started, proc->max_threads,
|
2017-08-31 16:04:18 +08:00
|
|
|
ready_threads,
|
2017-06-30 03:02:05 +08:00
|
|
|
free_async_space);
|
2011-11-30 19:18:14 +08:00
|
|
|
count = 0;
|
|
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|
|
|
|
count++;
|
2017-06-30 03:02:04 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " nodes: %d\n", count);
|
2011-11-30 19:18:14 +08:00
|
|
|
count = 0;
|
|
|
|
strong = 0;
|
|
|
|
weak = 0;
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
|
|
struct binder_ref *ref = rb_entry(n, struct binder_ref,
|
|
|
|
rb_node_desc);
|
|
|
|
count++;
|
2017-06-30 03:01:58 +08:00
|
|
|
strong += ref->data.strong;
|
|
|
|
weak += ref->data.weak;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-06-30 03:02:08 +08:00
|
|
|
binder_proc_unlock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:40 +08:00
|
|
|
count = binder_alloc_get_allocated_count(&proc->alloc);
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " buffers: %d\n", count);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-09-01 02:56:36 +08:00
|
|
|
binder_alloc_print_pages(m, &proc->alloc);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
count = 0;
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_lock(proc);
|
2011-11-30 19:18:14 +08:00
|
|
|
list_for_each_entry(w, &proc->todo, entry) {
|
2017-06-30 03:02:02 +08:00
|
|
|
if (w->type == BINDER_WORK_TRANSACTION)
|
2011-11-30 19:18:14 +08:00
|
|
|
count++;
|
|
|
|
}
|
2017-06-30 03:02:02 +08:00
|
|
|
binder_inner_proc_unlock(proc);
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m, " pending transactions: %d\n", count);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
print_binder_stats(m, " ", &proc->stats);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
static int state_show(struct seq_file *m, void *unused)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_proc *proc;
|
|
|
|
struct binder_node *node;
|
2017-06-30 03:02:03 +08:00
|
|
|
struct binder_node *last_node = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, "binder state:\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
2011-11-30 19:18:14 +08:00
|
|
|
if (!hlist_empty(&binder_dead_nodes))
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, "dead nodes:\n");
|
2017-06-30 03:02:03 +08:00
|
|
|
hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
|
|
|
|
/*
|
|
|
|
* take a temporary reference on the node so it
|
|
|
|
* survives and isn't removed from the list
|
|
|
|
* while we print it.
|
|
|
|
*/
|
|
|
|
node->tmp_refs++;
|
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
|
|
if (last_node)
|
|
|
|
binder_put_node(last_node);
|
|
|
|
binder_node_lock(node);
|
2017-06-30 03:02:04 +08:00
|
|
|
print_binder_node_nilocked(m, node);
|
2017-06-30 03:02:03 +08:00
|
|
|
binder_node_unlock(node);
|
|
|
|
last_node = node;
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
|
|
}
|
2017-06-30 03:01:43 +08:00
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
2017-06-30 03:02:03 +08:00
|
|
|
if (last_node)
|
|
|
|
binder_put_node(last_node);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
2009-04-29 11:57:50 +08:00
|
|
|
print_binder_proc(m, proc, 1);
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&binder_procs_lock);
|
2017-06-30 03:02:11 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
return 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
static int stats_show(struct seq_file *m, void *unused)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_proc *proc;
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, "binder stats:\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
print_binder_stats(m, "", &binder_stats);
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
2009-04-29 11:57:50 +08:00
|
|
|
print_binder_proc_stats(m, proc);
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&binder_procs_lock);
|
2017-06-30 03:02:11 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
return 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
static int transactions_show(struct seq_file *m, void *unused)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
|
|
|
struct binder_proc *proc;
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_puts(m, "binder transactions:\n");
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
2009-04-29 11:57:50 +08:00
|
|
|
print_binder_proc(m, proc, 0);
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&binder_procs_lock);
|
2017-06-30 03:02:11 +08:00
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
return 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2018-12-01 09:26:30 +08:00
|
|
|
static int proc_show(struct seq_file *m, void *unused)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2016-02-10 13:05:33 +08:00
|
|
|
struct binder_proc *itr;
|
2017-02-04 06:40:47 +08:00
|
|
|
int pid = (unsigned long)m->private;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_lock(&binder_procs_lock);
|
2016-02-10 13:05:33 +08:00
|
|
|
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
2017-02-04 06:40:47 +08:00
|
|
|
if (itr->pid == pid) {
|
|
|
|
seq_puts(m, "binder proc state:\n");
|
|
|
|
print_binder_proc(m, itr, 1);
|
2016-02-10 13:05:33 +08:00
|
|
|
}
|
|
|
|
}
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
return 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
static void print_binder_transaction_log_entry(struct seq_file *m,
|
2011-11-30 19:18:14 +08:00
|
|
|
struct binder_transaction_log_entry *e)
|
|
|
|
{
|
2017-06-30 03:01:53 +08:00
|
|
|
int debug_id = READ_ONCE(e->debug_id_done);
|
|
|
|
/*
|
|
|
|
* read barrier to guarantee debug_id_done read before
|
|
|
|
* we print the log values
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
2009-04-29 11:57:50 +08:00
|
|
|
seq_printf(m,
|
2017-06-30 03:01:53 +08:00
|
|
|
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
|
2009-04-29 11:57:50 +08:00
|
|
|
e->debug_id, (e->call_type == 2) ? "reply" :
|
|
|
|
((e->call_type == 1) ? "async" : "call "), e->from_proc,
|
2017-02-04 06:40:47 +08:00
|
|
|
e->from_thread, e->to_proc, e->to_thread, e->context_name,
|
2017-06-30 03:01:46 +08:00
|
|
|
e->to_node, e->target_handle, e->data_size, e->offsets_size,
|
|
|
|
e->return_error, e->return_error_param,
|
|
|
|
e->return_error_line);
|
2017-06-30 03:01:53 +08:00
|
|
|
/*
|
|
|
|
* read-barrier to guarantee read of debug_id_done after
|
|
|
|
* done printing the fields of the entry
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
|
|
|
|
"\n" : " (incomplete)\n");
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
static int transaction_log_show(struct seq_file *m, void *unused)
|
2011-11-30 19:18:14 +08:00
|
|
|
{
|
2009-04-29 11:57:50 +08:00
|
|
|
struct binder_transaction_log *log = m->private;
|
2017-06-30 03:01:53 +08:00
|
|
|
unsigned int log_cur = atomic_read(&log->cur);
|
|
|
|
unsigned int count;
|
|
|
|
unsigned int cur;
|
2011-11-30 19:18:14 +08:00
|
|
|
int i;
|
|
|
|
|
2017-06-30 03:01:53 +08:00
|
|
|
count = log_cur + 1;
|
|
|
|
cur = count < ARRAY_SIZE(log->entry) && !log->full ?
|
|
|
|
0 : count % ARRAY_SIZE(log->entry);
|
|
|
|
if (count > ARRAY_SIZE(log->entry) || log->full)
|
|
|
|
count = ARRAY_SIZE(log->entry);
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
unsigned int index = cur++ % ARRAY_SIZE(log->entry);
|
|
|
|
|
|
|
|
print_binder_transaction_log_entry(m, &log->entry[index]);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2009-04-29 11:57:50 +08:00
|
|
|
return 0;
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
|
|
|
|
binder: implement binderfs
As discussed at Linux Plumbers Conference 2018 in Vancouver [1] this is the
implementation of binderfs.
/* Abstract */
binderfs is a backwards-compatible filesystem for Android's binder ipc
mechanism. Each ipc namespace will mount a new binderfs instance. Mounting
binderfs multiple times at different locations in the same ipc namespace
will not cause a new super block to be allocated and hence it will be the
same filesystem instance.
Each new binderfs mount will have its own set of binder devices only
visible in the ipc namespace it has been mounted in. All devices in a new
binderfs mount will follow the scheme binder%d and numbering will always
start at 0.
/* Backwards compatibility */
Devices requested in the Kconfig via CONFIG_ANDROID_BINDER_DEVICES for the
initial ipc namespace will work as before. They will be registered via
misc_register() and appear in the devtmpfs mount. Specifically, the
standard devices binder, hwbinder, and vndbinder will all appear in their
standard locations in /dev. Mounting or unmounting the binderfs mount in
the initial ipc namespace will have no effect on these devices, i.e. they
will neither show up in the binderfs mount nor will they disappear when the
binderfs mount is gone.
/* binder-control */
Each new binderfs instance comes with a binder-control device. No other
devices will be present at first. The binder-control device can be used to
dynamically allocate binder devices. All requests operate on the binderfs
mount the binder-control device resides in.
Assuming a new instance of binderfs has been mounted at /dev/binderfs
via mount -t binderfs binderfs /dev/binderfs. Then a request to create a
new binder device can be made as illustrated in [2].
Binderfs devices can simply be removed via unlink().
/* Implementation details */
- dynamic major number allocation:
When binderfs is registered as a new filesystem it will dynamically
allocate a new major number. The allocated major number will be returned
in struct binderfs_device when a new binder device is allocated.
- global minor number tracking:
Minor are tracked in a global idr struct that is capped at
BINDERFS_MAX_MINOR. The minor number tracker is protected by a global
mutex. This is the only point of contention between binderfs mounts.
- struct binderfs_info:
Each binderfs super block has its own struct binderfs_info that tracks
specific details about a binderfs instance:
- ipc namespace
- dentry of the binder-control device
- root uid and root gid of the user namespace the binderfs instance
was mounted in
- mountable by user namespace root:
binderfs can be mounted by user namespace root in a non-initial user
namespace. The devices will be owned by user namespace root.
- binderfs binder devices without misc infrastructure:
New binder devices associated with a binderfs mount do not use the
full misc_register() infrastructure.
The misc_register() infrastructure can only create new devices in the
host's devtmpfs mount. binderfs does however only make devices appear
under its own mountpoint and thus allocates new character device nodes
from the inode of the root dentry of the super block. This will have
the side-effect that binderfs specific device nodes do not appear in
sysfs. This behavior is similar to devpts allocated pts devices and
has no effect on the functionality of the ipc mechanism itself.
[1]: https://goo.gl/JL2tfX
[2]: program to allocate a new binderfs binder device:
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <linux/android/binder_ctl.h>
int main(int argc, char *argv[])
{
int fd, ret, saved_errno;
size_t len;
struct binderfs_device device = { 0 };
if (argc < 2)
exit(EXIT_FAILURE);
len = strlen(argv[1]);
if (len > BINDERFS_MAX_NAME)
exit(EXIT_FAILURE);
memcpy(device.name, argv[1], len);
fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
printf("%s - Failed to open binder-control device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
ret = ioctl(fd, BINDER_CTL_ADD, &device);
saved_errno = errno;
close(fd);
errno = saved_errno;
if (ret < 0) {
printf("%s - Failed to allocate new binder device\n",
strerror(errno));
exit(EXIT_FAILURE);
}
printf("Allocated new binder device with major %d, minor %d, and "
"name %s\n", device.major, device.minor,
device.name);
exit(EXIT_SUCCESS);
}
Cc: Martijn Coenen <maco@android.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-12-14 20:11:14 +08:00
|
|
|
const struct file_operations binder_fops = {
|
2011-11-30 19:18:14 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.poll = binder_poll,
|
|
|
|
.unlocked_ioctl = binder_ioctl,
|
2018-09-12 03:59:08 +08:00
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
2011-11-30 19:18:14 +08:00
|
|
|
.mmap = binder_mmap,
|
|
|
|
.open = binder_open,
|
|
|
|
.flush = binder_flush,
|
|
|
|
.release = binder_release,
|
|
|
|
};
|
|
|
|
|
2022-07-02 02:20:41 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(state);
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(stats);
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(transactions);
|
|
|
|
DEFINE_SHOW_ATTRIBUTE(transaction_log);
|
|
|
|
|
|
|
|
const struct binder_debugfs_entry binder_debugfs_entries[] = {
|
|
|
|
{
|
|
|
|
.name = "state",
|
|
|
|
.mode = 0444,
|
|
|
|
.fops = &state_fops,
|
|
|
|
.data = NULL,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "stats",
|
|
|
|
.mode = 0444,
|
|
|
|
.fops = &stats_fops,
|
|
|
|
.data = NULL,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "transactions",
|
|
|
|
.mode = 0444,
|
|
|
|
.fops = &transactions_fops,
|
|
|
|
.data = NULL,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "transaction_log",
|
|
|
|
.mode = 0444,
|
|
|
|
.fops = &transaction_log_fops,
|
|
|
|
.data = &binder_transaction_log,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "failed_transaction_log",
|
|
|
|
.mode = 0444,
|
|
|
|
.fops = &transaction_log_fops,
|
|
|
|
.data = &binder_transaction_log_failed,
|
|
|
|
},
|
|
|
|
{} /* terminator */
|
|
|
|
};
|
|
|
|
|
2017-02-04 06:40:48 +08:00
|
|
|
static int __init init_binder_device(const char *name)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct binder_device *binder_device;
|
|
|
|
|
|
|
|
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
|
|
|
|
if (!binder_device)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
binder_device->miscdev.fops = &binder_fops;
|
|
|
|
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
|
|
|
|
binder_device->miscdev.name = name;
|
|
|
|
|
2020-03-04 00:43:40 +08:00
|
|
|
refcount_set(&binder_device->ref, 1);
|
2017-02-04 06:40:48 +08:00
|
|
|
binder_device->context.binder_context_mgr_uid = INVALID_UID;
|
|
|
|
binder_device->context.name = name;
|
2017-06-30 03:01:43 +08:00
|
|
|
mutex_init(&binder_device->context.context_mgr_node_lock);
|
2017-02-04 06:40:48 +08:00
|
|
|
|
|
|
|
ret = misc_register(&binder_device->miscdev);
|
|
|
|
if (ret < 0) {
|
|
|
|
kfree(binder_device);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
hlist_add_head(&binder_device->hlist, &binder_devices);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
static int __init binder_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
2019-01-31 08:25:02 +08:00
|
|
|
char *device_name, *device_tmp;
|
2017-02-04 06:40:48 +08:00
|
|
|
struct binder_device *device;
|
|
|
|
struct hlist_node *tmp;
|
2019-01-31 08:25:02 +08:00
|
|
|
char *device_names = NULL;
|
2011-11-30 19:18:14 +08:00
|
|
|
|
2017-11-29 21:29:47 +08:00
|
|
|
ret = binder_alloc_shrinker_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-08-23 23:46:42 +08:00
|
|
|
|
2017-06-30 03:01:53 +08:00
|
|
|
atomic_set(&binder_transaction_log.cur, ~0U);
|
|
|
|
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
2022-07-02 02:20:41 +08:00
|
|
|
if (binder_debugfs_dir_entry_root) {
|
|
|
|
const struct binder_debugfs_entry *db_entry;
|
|
|
|
|
|
|
|
binder_for_each_debugfs_entry(db_entry)
|
|
|
|
debugfs_create_file(db_entry->name,
|
|
|
|
db_entry->mode,
|
|
|
|
binder_debugfs_dir_entry_root,
|
|
|
|
db_entry->data,
|
|
|
|
db_entry->fops);
|
|
|
|
|
2009-04-29 11:57:50 +08:00
|
|
|
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
|
|
|
binder_debugfs_dir_entry_root);
|
2011-11-30 19:18:14 +08:00
|
|
|
}
|
2017-02-04 06:40:48 +08:00
|
|
|
|
2019-09-04 19:07:03 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
|
|
|
|
strcmp(binder_devices_param, "") != 0) {
|
2019-01-26 18:23:20 +08:00
|
|
|
/*
|
|
|
|
* Copy the module_parameter string, because we don't want to
|
|
|
|
* tokenize it in-place.
|
|
|
|
*/
|
|
|
|
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
|
|
|
|
if (!device_names) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_alloc_device_names_failed;
|
|
|
|
}
|
2017-02-04 06:40:48 +08:00
|
|
|
|
2019-01-26 18:23:20 +08:00
|
|
|
device_tmp = device_names;
|
|
|
|
while ((device_name = strsep(&device_tmp, ","))) {
|
|
|
|
ret = init_binder_device(device_name);
|
|
|
|
if (ret)
|
|
|
|
goto err_init_binder_device_failed;
|
|
|
|
}
|
2017-02-04 06:40:48 +08:00
|
|
|
}
|
|
|
|
|
2019-01-31 08:25:02 +08:00
|
|
|
ret = init_binderfs();
|
|
|
|
if (ret)
|
|
|
|
goto err_init_binder_device_failed;
|
|
|
|
|
2017-02-04 06:40:48 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
err_init_binder_device_failed:
|
|
|
|
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
|
|
|
|
misc_deregister(&device->miscdev);
|
|
|
|
hlist_del(&device->hlist);
|
|
|
|
kfree(device);
|
|
|
|
}
|
2017-08-21 22:13:28 +08:00
|
|
|
|
|
|
|
kfree(device_names);
|
|
|
|
|
2017-02-04 06:40:48 +08:00
|
|
|
err_alloc_device_names_failed:
|
|
|
|
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_initcall(binder_init);
|
|
|
|
|
2012-10-17 06:29:53 +08:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include "binder_trace.h"
|
|
|
|
|
2011-11-30 19:18:14 +08:00
|
|
|
MODULE_LICENSE("GPL v2");
|