binder: add spinlocks to protect todo lists

The todo lists in the proc, thread, and node structures
are accessed by other procs/threads to place work
items on the queue.

The todo lists are protected by the new proc->inner_lock.
No locks should ever be nested under these locks. As the
name suggests, an outer lock will be introduced in
a later patch.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Todd Kjos 2017-06-29 12:02:02 -07:00 committed by Greg Kroah-Hartman
parent ed29721e22
commit 72196393a5
1 changed files with 269 additions and 86 deletions

View File

@ -279,8 +279,16 @@ struct binder_device {
struct binder_context context;
};
/**
* struct binder_work - work enqueued on a worklist
* @entry: node enqueued on list
* @type: type of work to be performed
*
* There are separate work lists for proc, thread, and node (async).
*/
struct binder_work {
struct list_head entry;
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
@ -303,6 +311,7 @@ struct binder_error {
* (invariant after initialized)
* @lock: lock for node fields
* @work: worklist element for node work
* (protected by @proc->inner_lock)
* @rb_node: element for proc->nodes tree
* @dead_node: element for binder_dead_nodes list
* (protected by binder_dead_nodes_lock)
@ -347,6 +356,7 @@ struct binder_error {
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
* Bookkeeping structure for binder nodes.
*/
@ -388,6 +398,11 @@ struct binder_node {
};
struct binder_ref_death {
/**
* @work: worklist element for death notifications
* (protected by inner_lock of the proc that
* this ref belongs to)
*/
struct binder_work work;
binder_uintptr_t cookie;
};
@ -467,11 +482,13 @@ enum binder_deferred_state {
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* @todo: list of work for this process
* (protected by @inner_lock)
* @wait: wait queue head to wait for proc work
* (invariant after initialized)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
* (protected by @inner_lock)
* @max_threads: cap on number of binder threads
* @requested_threads: number of binder threads requested but not
* yet started. In current implementation, can
@ -542,6 +559,7 @@ enum {
* (no lock needed)
* @transaction_stack: stack of in-progress transactions for this thread
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@ -689,6 +707,111 @@ _binder_node_unlock(struct binder_node *node, int line)
spin_unlock(&node->lock);
}
static bool binder_worklist_empty_ilocked(struct list_head *list)
{
return list_empty(list);
}
/**
* binder_worklist_empty() - Check if no items on the work list
* @proc: binder_proc associated with list
* @list: list to check
*
* Return: true if there are no items on list, else false
*/
static bool binder_worklist_empty(struct binder_proc *proc,
struct list_head *list)
{
bool ret;
binder_inner_proc_lock(proc);
ret = binder_worklist_empty_ilocked(list);
binder_inner_proc_unlock(proc);
return ret;
}
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
{
BUG_ON(target_list == NULL);
BUG_ON(work->entry.next && !list_empty(&work->entry));
list_add_tail(&work->entry, target_list);
}
/**
* binder_enqueue_work() - Add an item to the work list
* @proc: binder_proc associated with list
* @work: struct binder_work to add to list
* @target_list: list to add work to
*
* Adds the work to the specified list. Asserts that work
* is not already on a list.
*/
static void
binder_enqueue_work(struct binder_proc *proc,
struct binder_work *work,
struct list_head *target_list)
{
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked(work, target_list);
binder_inner_proc_unlock(proc);
}
static void
binder_dequeue_work_ilocked(struct binder_work *work)
{
list_del_init(&work->entry);
}
/**
* binder_dequeue_work() - Removes an item from the work list
* @proc: binder_proc associated with list
* @work: struct binder_work to remove from list
*
* Removes the specified work item from whatever list it is on.
* Can safely be called if work is not on any list.
*/
static void
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
{
binder_inner_proc_lock(proc);
binder_dequeue_work_ilocked(work);
binder_inner_proc_unlock(proc);
}
static struct binder_work *binder_dequeue_work_head_ilocked(
struct list_head *list)
{
struct binder_work *w;
w = list_first_entry_or_null(list, struct binder_work, entry);
if (w)
list_del_init(&w->entry);
return w;
}
/**
* binder_dequeue_work_head() - Dequeues the item at head of list
* @proc: binder_proc associated with list
* @list: list to dequeue head
*
* Removes the head of the list if there are items on the list
*
* Return: pointer dequeued binder_work, NULL if list was empty
*/
static struct binder_work *binder_dequeue_work_head(
struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
binder_inner_proc_unlock(proc);
return w;
}
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
@ -870,8 +993,8 @@ static int binder_inc_node_ilocked(struct binder_node *node, int strong,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
list_del_init(&node->work.entry);
list_add_tail(&node->work.entry, target_list);
binder_dequeue_work_ilocked(&node->work);
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@ -882,7 +1005,7 @@ static int binder_inc_node_ilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
list_add_tail(&node->work.entry, target_list);
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
@ -926,19 +1049,20 @@ static bool binder_dec_node_ilocked(struct binder_node *node,
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
list_add_tail(&node->work.entry, &node->proc->todo);
binder_enqueue_work_ilocked(&node->work, &proc->todo);
wake_up_interruptible(&node->proc->wait);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
!node->local_weak_refs && !node->tmp_refs) {
list_del_init(&node->work.entry);
if (proc) {
rb_erase(&node->rb_node, &node->proc->nodes);
binder_dequeue_work_ilocked(&node->work);
rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
BUG_ON(!list_empty(&node->work.entry));
spin_lock(&binder_dead_nodes_lock);
/*
* tmp_refs could have changed so
@ -1188,7 +1312,7 @@ static void binder_cleanup_ref(struct binder_ref *ref)
"%d delete ref %d desc %d has death notification\n",
ref->proc->pid, ref->data.debug_id,
ref->data.desc);
list_del(&ref->death->work.entry);
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
binder_stats_deleted(BINDER_STAT_REF);
@ -1539,8 +1663,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
list_add_tail(
&target_thread->reply_error.work.entry,
binder_enqueue_work(
target_thread->proc,
&target_thread->reply_error.work,
&target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
@ -2578,7 +2703,7 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
binder_enqueue_work(proc, tcomplete, &thread->todo);
if (reply) {
if (target_thread->is_dead)
@ -2609,7 +2734,7 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_proc_or_thread;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
binder_enqueue_work(target_proc, &t->work, target_list);
if (target_wait) {
if (reply || !(tr->flags & TF_ONE_WAY))
wake_up_interruptible_sync(target_wait);
@ -2685,13 +2810,15 @@ err_no_context_mgr_node:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
list_add_tail(&thread->return_error.work.entry,
&thread->todo);
binder_enqueue_work(thread->proc,
&thread->return_error.work,
&thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
list_add_tail(&thread->return_error.work.entry,
&thread->todo);
binder_enqueue_work(thread->proc,
&thread->return_error.work,
&thread->todo);
}
}
@ -2884,11 +3011,21 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
struct binder_node *buf_node;
struct binder_work *w;
buf_node = buffer->target_node;
BUG_ON(!buf_node->has_async_transaction);
BUG_ON(buf_node->proc != proc);
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w)
buf_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
binder_enqueue_work_ilocked(
w, &thread->todo);
binder_inner_proc_unlock(proc);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
@ -3000,9 +3137,10 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
list_add_tail(
&thread->return_error.work.entry,
&thread->todo);
binder_enqueue_work(
thread->proc,
&thread->return_error.work,
&thread->todo);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
proc->pid, thread->pid);
@ -3014,11 +3152,20 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&ref->death->work.entry, &thread->todo);
} else {
list_add_tail(&ref->death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_work(
proc,
&ref->death->work,
&thread->todo);
else {
binder_enqueue_work(
proc,
&ref->death->work,
&proc->todo);
wake_up_interruptible(
&proc->wait);
}
}
} else {
@ -3036,18 +3183,27 @@ static int binder_thread_write(struct binder_proc *proc,
break;
}
ref->death = NULL;
binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&death->work.entry, &thread->todo);
} else {
list_add_tail(&death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_work_ilocked(
&death->work,
&thread->todo);
else {
binder_enqueue_work_ilocked(
&death->work,
&proc->todo);
wake_up_interruptible(
&proc->wait);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
binder_inner_proc_unlock(proc);
}
} break;
case BC_DEAD_BINDER_DONE: {
@ -3059,8 +3215,13 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->delivered_death,
entry) {
struct binder_ref_death *tmp_death =
container_of(w,
struct binder_ref_death,
work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@ -3074,19 +3235,25 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
binder_inner_proc_unlock(proc);
break;
}
list_del_init(&death->work.entry);
binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&death->work.entry, &thread->todo);
} else {
list_add_tail(&death->work.entry, &proc->todo);
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_work_ilocked(
&death->work, &thread->todo);
else {
binder_enqueue_work_ilocked(
&death->work,
&proc->todo);
wake_up_interruptible(&proc->wait);
}
}
binder_inner_proc_unlock(proc);
} break;
default:
@ -3113,12 +3280,14 @@ static void binder_stat_br(struct binder_proc *proc,
static int binder_has_proc_work(struct binder_proc *proc,
struct binder_thread *thread)
{
return !list_empty(&proc->todo) || thread->looper_need_return;
return !binder_worklist_empty(proc, &proc->todo) ||
thread->looper_need_return;
}
static int binder_has_thread_work(struct binder_thread *thread)
{
return !list_empty(&thread->todo) || thread->looper_need_return;
return !binder_worklist_empty(thread->proc, &thread->todo) ||
thread->looper_need_return;
}
static int binder_put_node_cmd(struct binder_proc *proc,
@ -3172,7 +3341,7 @@ static int binder_thread_read(struct binder_proc *proc,
retry:
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
binder_worklist_empty(proc, &thread->todo);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
@ -3182,7 +3351,7 @@ retry:
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!list_empty(&thread->todo));
!binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@ -3217,18 +3386,20 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
binder_inner_proc_lock(proc);
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
@ -3239,7 +3410,7 @@ retry:
binder_inner_proc_unlock(proc);
break;
}
list_del_init(&w->entry);
w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
@ -3388,8 +3559,8 @@ retry:
binder_stats_deleted(BINDER_STAT_DEATH);
} else {
binder_inner_proc_lock(proc);
list_add_tail(&w->entry,
&proc->delivered_death);
binder_enqueue_work_ilocked(
w, &proc->delivered_death);
binder_inner_proc_unlock(proc);
}
if (cmd == BR_DEAD_BINDER)
@ -3499,13 +3670,16 @@ done:
return 0;
}
static void binder_release_work(struct list_head *list)
static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
while (!list_empty(list)) {
w = list_first_entry(list, struct binder_work, entry);
list_del_init(&w->entry);
while (1) {
w = binder_dequeue_work_head(proc, list);
if (!w)
return;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@ -3669,7 +3843,7 @@ static int binder_thread_release(struct binder_proc *proc,
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(&thread->todo);
binder_release_work(proc, &thread->todo);
binder_thread_dec_tmpref(thread);
return active_transactions;
}
@ -3686,7 +3860,7 @@ static unsigned int binder_poll(struct file *filp,
thread = binder_get_thread(proc);
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
binder_worklist_empty(proc, &thread->todo);
binder_unlock(__func__);
@ -3749,7 +3923,7 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
if (!binder_worklist_empty(proc, &proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
@ -4069,10 +4243,10 @@ static int binder_node_release(struct binder_node *node, int refs)
int death = 0;
struct binder_proc *proc = node->proc;
binder_release_work(&node->async_todo);
binder_release_work(proc, &node->async_todo);
binder_inner_proc_lock(proc);
list_del_init(&node->work.entry);
binder_dequeue_work_ilocked(&node->work);
/*
* The caller must have taken a temporary ref on the node,
*/
@ -4101,13 +4275,15 @@ static int binder_node_release(struct binder_node *node, int refs)
death++;
binder_inner_proc_lock(ref->proc);
if (list_empty(&ref->death->work.entry)) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
list_add_tail(&ref->death->work.entry,
&ref->proc->todo);
binder_enqueue_work_ilocked(&ref->death->work,
&ref->proc->todo);
wake_up_interruptible(&ref->proc->wait);
} else
BUG();
binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@ -4183,8 +4359,8 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_free_ref(ref);
}
binder_release_work(&proc->todo);
binder_release_work(&proc->delivered_death);
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@ -4275,9 +4451,9 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
t->buffer->data);
}
static void print_binder_work(struct seq_file *m, const char *prefix,
const char *transaction_prefix,
struct binder_work *w)
static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
const char *transaction_prefix,
struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@ -4318,15 +4494,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
static void print_binder_thread(struct seq_file *m,
struct binder_thread *thread,
int print_always)
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
thread->pid, thread->looper,
thread->looper_need_return,
@ -4348,7 +4525,8 @@ static void print_binder_thread(struct seq_file *m,
}
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work(m, " ", " pending transaction", w);
print_binder_work_ilocked(m, " ",
" pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
@ -4375,9 +4553,13 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work(m, " ",
" pending async transaction", w);
if (node->proc) {
binder_inner_proc_lock(node->proc);
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, " ",
" pending async transaction", w);
binder_inner_proc_unlock(node->proc);
}
}
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
@ -4401,9 +4583,11 @@ static void print_binder_proc(struct seq_file *m,
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
print_binder_thread(m, rb_entry(n, struct binder_thread,
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
binder_inner_proc_unlock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
@ -4418,12 +4602,14 @@ static void print_binder_proc(struct seq_file *m,
rb_node_desc));
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work(m, " ", " pending transaction", w);
print_binder_work_ilocked(m, " ", " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@ -4562,15 +4748,12 @@ static void print_binder_proc_stats(struct seq_file *m,
seq_printf(m, " buffers: %d\n", count);
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
switch (w->type) {
case BINDER_WORK_TRANSACTION:
if (w->type == BINDER_WORK_TRANSACTION)
count++;
break;
default:
break;
}
}
binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);