Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/binder.c
29509 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* binder.c
3
*
4
* Android IPC Subsystem
5
*
6
* Copyright (C) 2007-2008 Google, Inc.
7
*/
8
9
/*
10
* Locking overview
11
*
12
* There are 3 main spinlocks which must be acquired in the
13
* order shown:
14
*
15
* 1) proc->outer_lock : protects binder_ref
16
* binder_proc_lock() and binder_proc_unlock() are
17
* used to acq/rel.
18
* 2) node->lock : protects most fields of binder_node.
19
* binder_node_lock() and binder_node_unlock() are
20
* used to acq/rel
21
* 3) proc->inner_lock : protects the thread and node lists
22
* (proc->threads, proc->waiting_threads, proc->nodes)
23
* and all todo lists associated with the binder_proc
24
* (proc->todo, thread->todo, proc->delivered_death and
25
* node->async_todo), as well as thread->transaction_stack
26
* binder_inner_proc_lock() and binder_inner_proc_unlock()
27
* are used to acq/rel
28
*
29
* Any lock under procA must never be nested under any lock at the same
30
* level or below on procB.
31
*
32
* Functions that require a lock held on entry indicate which lock
33
* in the suffix of the function name:
34
*
35
* foo_olocked() : requires node->outer_lock
36
* foo_nlocked() : requires node->lock
37
* foo_ilocked() : requires proc->inner_lock
38
* foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39
* foo_nilocked(): requires node->lock and proc->inner_lock
40
* ...
41
*/
42
43
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45
#include <linux/fdtable.h>
46
#include <linux/file.h>
47
#include <linux/freezer.h>
48
#include <linux/fs.h>
49
#include <linux/list.h>
50
#include <linux/miscdevice.h>
51
#include <linux/module.h>
52
#include <linux/mutex.h>
53
#include <linux/nsproxy.h>
54
#include <linux/poll.h>
55
#include <linux/debugfs.h>
56
#include <linux/rbtree.h>
57
#include <linux/sched/signal.h>
58
#include <linux/sched/mm.h>
59
#include <linux/seq_file.h>
60
#include <linux/string.h>
61
#include <linux/uaccess.h>
62
#include <linux/pid_namespace.h>
63
#include <linux/security.h>
64
#include <linux/spinlock.h>
65
#include <linux/ratelimit.h>
66
#include <linux/syscalls.h>
67
#include <linux/task_work.h>
68
#include <linux/sizes.h>
69
#include <linux/ktime.h>
70
71
#include <kunit/visibility.h>
72
73
#include <uapi/linux/android/binder.h>
74
75
#include <linux/cacheflush.h>
76
77
#include "binder_netlink.h"
78
#include "binder_internal.h"
79
#include "binder_trace.h"
80
81
static HLIST_HEAD(binder_deferred_list);
82
static DEFINE_MUTEX(binder_deferred_lock);
83
84
static HLIST_HEAD(binder_devices);
85
static DEFINE_SPINLOCK(binder_devices_lock);
86
87
static HLIST_HEAD(binder_procs);
88
static DEFINE_MUTEX(binder_procs_lock);
89
90
static HLIST_HEAD(binder_dead_nodes);
91
static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93
static struct dentry *binder_debugfs_dir_entry_root;
94
static struct dentry *binder_debugfs_dir_entry_proc;
95
static atomic_t binder_last_id;
96
97
static int proc_show(struct seq_file *m, void *unused);
98
DEFINE_SHOW_ATTRIBUTE(proc);
99
100
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
101
102
enum {
103
BINDER_DEBUG_USER_ERROR = 1U << 0,
104
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
105
BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
106
BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
107
BINDER_DEBUG_DEAD_BINDER = 1U << 4,
108
BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
109
BINDER_DEBUG_READ_WRITE = 1U << 6,
110
BINDER_DEBUG_USER_REFS = 1U << 7,
111
BINDER_DEBUG_THREADS = 1U << 8,
112
BINDER_DEBUG_TRANSACTION = 1U << 9,
113
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
114
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
115
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
116
BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
117
BINDER_DEBUG_SPINLOCKS = 1U << 14,
118
};
119
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
120
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
121
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
122
123
char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
124
module_param_named(devices, binder_devices_param, charp, 0444);
125
126
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
127
static int binder_stop_on_user_error;
128
129
static int binder_set_stop_on_user_error(const char *val,
130
const struct kernel_param *kp)
131
{
132
int ret;
133
134
ret = param_set_int(val, kp);
135
if (binder_stop_on_user_error < 2)
136
wake_up(&binder_user_error_wait);
137
return ret;
138
}
139
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
140
param_get_int, &binder_stop_on_user_error, 0644);
141
142
static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
143
{
144
struct va_format vaf;
145
va_list args;
146
147
if (binder_debug_mask & mask) {
148
va_start(args, format);
149
vaf.va = &args;
150
vaf.fmt = format;
151
pr_info_ratelimited("%pV", &vaf);
152
va_end(args);
153
}
154
}
155
156
#define binder_txn_error(x...) \
157
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
158
159
static __printf(1, 2) void binder_user_error(const char *format, ...)
160
{
161
struct va_format vaf;
162
va_list args;
163
164
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
165
va_start(args, format);
166
vaf.va = &args;
167
vaf.fmt = format;
168
pr_info_ratelimited("%pV", &vaf);
169
va_end(args);
170
}
171
172
if (binder_stop_on_user_error)
173
binder_stop_on_user_error = 2;
174
}
175
176
#define binder_set_extended_error(ee, _id, _command, _param) \
177
do { \
178
(ee)->id = _id; \
179
(ee)->command = _command; \
180
(ee)->param = _param; \
181
} while (0)
182
183
#define to_flat_binder_object(hdr) \
184
container_of(hdr, struct flat_binder_object, hdr)
185
186
#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187
188
#define to_binder_buffer_object(hdr) \
189
container_of(hdr, struct binder_buffer_object, hdr)
190
191
#define to_binder_fd_array_object(hdr) \
192
container_of(hdr, struct binder_fd_array_object, hdr)
193
194
static struct binder_stats binder_stats;
195
196
static inline void binder_stats_deleted(enum binder_stat_types type)
197
{
198
atomic_inc(&binder_stats.obj_deleted[type]);
199
}
200
201
static inline void binder_stats_created(enum binder_stat_types type)
202
{
203
atomic_inc(&binder_stats.obj_created[type]);
204
}
205
206
struct binder_transaction_log_entry {
207
int debug_id;
208
int debug_id_done;
209
int call_type;
210
int from_proc;
211
int from_thread;
212
int target_handle;
213
int to_proc;
214
int to_thread;
215
int to_node;
216
int data_size;
217
int offsets_size;
218
int return_error_line;
219
uint32_t return_error;
220
uint32_t return_error_param;
221
char context_name[BINDERFS_MAX_NAME + 1];
222
};
223
224
struct binder_transaction_log {
225
atomic_t cur;
226
bool full;
227
struct binder_transaction_log_entry entry[32];
228
};
229
230
static struct binder_transaction_log binder_transaction_log;
231
static struct binder_transaction_log binder_transaction_log_failed;
232
233
static struct binder_transaction_log_entry *binder_transaction_log_add(
234
struct binder_transaction_log *log)
235
{
236
struct binder_transaction_log_entry *e;
237
unsigned int cur = atomic_inc_return(&log->cur);
238
239
if (cur >= ARRAY_SIZE(log->entry))
240
log->full = true;
241
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242
WRITE_ONCE(e->debug_id_done, 0);
243
/*
244
* write-barrier to synchronize access to e->debug_id_done.
245
* We make sure the initialized 0 value is seen before
246
* memset() other fields are zeroed by memset.
247
*/
248
smp_wmb();
249
memset(e, 0, sizeof(*e));
250
return e;
251
}
252
253
enum binder_deferred_state {
254
BINDER_DEFERRED_FLUSH = 0x01,
255
BINDER_DEFERRED_RELEASE = 0x02,
256
};
257
258
enum {
259
BINDER_LOOPER_STATE_REGISTERED = 0x01,
260
BINDER_LOOPER_STATE_ENTERED = 0x02,
261
BINDER_LOOPER_STATE_EXITED = 0x04,
262
BINDER_LOOPER_STATE_INVALID = 0x08,
263
BINDER_LOOPER_STATE_WAITING = 0x10,
264
BINDER_LOOPER_STATE_POLL = 0x20,
265
};
266
267
/**
268
* binder_proc_lock() - Acquire outer lock for given binder_proc
269
* @proc: struct binder_proc to acquire
270
*
271
* Acquires proc->outer_lock. Used to protect binder_ref
272
* structures associated with the given proc.
273
*/
274
#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
275
static void
276
_binder_proc_lock(struct binder_proc *proc, int line)
277
__acquires(&proc->outer_lock)
278
{
279
binder_debug(BINDER_DEBUG_SPINLOCKS,
280
"%s: line=%d\n", __func__, line);
281
spin_lock(&proc->outer_lock);
282
}
283
284
/**
285
* binder_proc_unlock() - Release outer lock for given binder_proc
286
* @proc: struct binder_proc to acquire
287
*
288
* Release lock acquired via binder_proc_lock()
289
*/
290
#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
291
static void
292
_binder_proc_unlock(struct binder_proc *proc, int line)
293
__releases(&proc->outer_lock)
294
{
295
binder_debug(BINDER_DEBUG_SPINLOCKS,
296
"%s: line=%d\n", __func__, line);
297
spin_unlock(&proc->outer_lock);
298
}
299
300
/**
301
* binder_inner_proc_lock() - Acquire inner lock for given binder_proc
302
* @proc: struct binder_proc to acquire
303
*
304
* Acquires proc->inner_lock. Used to protect todo lists
305
*/
306
#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
307
static void
308
_binder_inner_proc_lock(struct binder_proc *proc, int line)
309
__acquires(&proc->inner_lock)
310
{
311
binder_debug(BINDER_DEBUG_SPINLOCKS,
312
"%s: line=%d\n", __func__, line);
313
spin_lock(&proc->inner_lock);
314
}
315
316
/**
317
* binder_inner_proc_unlock() - Release inner lock for given binder_proc
318
* @proc: struct binder_proc to acquire
319
*
320
* Release lock acquired via binder_inner_proc_lock()
321
*/
322
#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
323
static void
324
_binder_inner_proc_unlock(struct binder_proc *proc, int line)
325
__releases(&proc->inner_lock)
326
{
327
binder_debug(BINDER_DEBUG_SPINLOCKS,
328
"%s: line=%d\n", __func__, line);
329
spin_unlock(&proc->inner_lock);
330
}
331
332
/**
333
* binder_node_lock() - Acquire spinlock for given binder_node
334
* @node: struct binder_node to acquire
335
*
336
* Acquires node->lock. Used to protect binder_node fields
337
*/
338
#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
339
static void
340
_binder_node_lock(struct binder_node *node, int line)
341
__acquires(&node->lock)
342
{
343
binder_debug(BINDER_DEBUG_SPINLOCKS,
344
"%s: line=%d\n", __func__, line);
345
spin_lock(&node->lock);
346
}
347
348
/**
349
* binder_node_unlock() - Release spinlock for given binder_proc
350
* @node: struct binder_node to acquire
351
*
352
* Release lock acquired via binder_node_lock()
353
*/
354
#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
355
static void
356
_binder_node_unlock(struct binder_node *node, int line)
357
__releases(&node->lock)
358
{
359
binder_debug(BINDER_DEBUG_SPINLOCKS,
360
"%s: line=%d\n", __func__, line);
361
spin_unlock(&node->lock);
362
}
363
364
/**
365
* binder_node_inner_lock() - Acquire node and inner locks
366
* @node: struct binder_node to acquire
367
*
368
* Acquires node->lock. If node->proc also acquires
369
* proc->inner_lock. Used to protect binder_node fields
370
*/
371
#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
372
static void
373
_binder_node_inner_lock(struct binder_node *node, int line)
374
__acquires(&node->lock) __acquires(&node->proc->inner_lock)
375
{
376
binder_debug(BINDER_DEBUG_SPINLOCKS,
377
"%s: line=%d\n", __func__, line);
378
spin_lock(&node->lock);
379
if (node->proc)
380
binder_inner_proc_lock(node->proc);
381
else
382
/* annotation for sparse */
383
__acquire(&node->proc->inner_lock);
384
}
385
386
/**
387
* binder_node_inner_unlock() - Release node and inner locks
388
* @node: struct binder_node to acquire
389
*
390
* Release lock acquired via binder_node_lock()
391
*/
392
#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
393
static void
394
_binder_node_inner_unlock(struct binder_node *node, int line)
395
__releases(&node->lock) __releases(&node->proc->inner_lock)
396
{
397
struct binder_proc *proc = node->proc;
398
399
binder_debug(BINDER_DEBUG_SPINLOCKS,
400
"%s: line=%d\n", __func__, line);
401
if (proc)
402
binder_inner_proc_unlock(proc);
403
else
404
/* annotation for sparse */
405
__release(&node->proc->inner_lock);
406
spin_unlock(&node->lock);
407
}
408
409
static bool binder_worklist_empty_ilocked(struct list_head *list)
410
{
411
return list_empty(list);
412
}
413
414
/**
415
* binder_worklist_empty() - Check if no items on the work list
416
* @proc: binder_proc associated with list
417
* @list: list to check
418
*
419
* Return: true if there are no items on list, else false
420
*/
421
static bool binder_worklist_empty(struct binder_proc *proc,
422
struct list_head *list)
423
{
424
bool ret;
425
426
binder_inner_proc_lock(proc);
427
ret = binder_worklist_empty_ilocked(list);
428
binder_inner_proc_unlock(proc);
429
return ret;
430
}
431
432
/**
433
* binder_enqueue_work_ilocked() - Add an item to the work list
434
* @work: struct binder_work to add to list
435
* @target_list: list to add work to
436
*
437
* Adds the work to the specified list. Asserts that work
438
* is not already on a list.
439
*
440
* Requires the proc->inner_lock to be held.
441
*/
442
static void
443
binder_enqueue_work_ilocked(struct binder_work *work,
444
struct list_head *target_list)
445
{
446
BUG_ON(target_list == NULL);
447
BUG_ON(work->entry.next && !list_empty(&work->entry));
448
list_add_tail(&work->entry, target_list);
449
}
450
451
/**
452
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
453
* @thread: thread to queue work to
454
* @work: struct binder_work to add to list
455
*
456
* Adds the work to the todo list of the thread. Doesn't set the process_todo
457
* flag, which means that (if it wasn't already set) the thread will go to
458
* sleep without handling this work when it calls read.
459
*
460
* Requires the proc->inner_lock to be held.
461
*/
462
static void
463
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
464
struct binder_work *work)
465
{
466
WARN_ON(!list_empty(&thread->waiting_thread_node));
467
binder_enqueue_work_ilocked(work, &thread->todo);
468
}
469
470
/**
471
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
472
* @thread: thread to queue work to
473
* @work: struct binder_work to add to list
474
*
475
* Adds the work to the todo list of the thread, and enables processing
476
* of the todo queue.
477
*
478
* Requires the proc->inner_lock to be held.
479
*/
480
static void
481
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
482
struct binder_work *work)
483
{
484
WARN_ON(!list_empty(&thread->waiting_thread_node));
485
binder_enqueue_work_ilocked(work, &thread->todo);
486
487
/* (e)poll-based threads require an explicit wakeup signal when
488
* queuing their own work; they rely on these events to consume
489
* messages without I/O block. Without it, threads risk waiting
490
* indefinitely without handling the work.
491
*/
492
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
493
thread->pid == current->pid && !thread->process_todo)
494
wake_up_interruptible_sync(&thread->wait);
495
496
thread->process_todo = true;
497
}
498
499
/**
500
* binder_enqueue_thread_work() - Add an item to the thread work list
501
* @thread: thread to queue work to
502
* @work: struct binder_work to add to list
503
*
504
* Adds the work to the todo list of the thread, and enables processing
505
* of the todo queue.
506
*/
507
static void
508
binder_enqueue_thread_work(struct binder_thread *thread,
509
struct binder_work *work)
510
{
511
binder_inner_proc_lock(thread->proc);
512
binder_enqueue_thread_work_ilocked(thread, work);
513
binder_inner_proc_unlock(thread->proc);
514
}
515
516
static void
517
binder_dequeue_work_ilocked(struct binder_work *work)
518
{
519
list_del_init(&work->entry);
520
}
521
522
/**
523
* binder_dequeue_work() - Removes an item from the work list
524
* @proc: binder_proc associated with list
525
* @work: struct binder_work to remove from list
526
*
527
* Removes the specified work item from whatever list it is on.
528
* Can safely be called if work is not on any list.
529
*/
530
static void
531
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
532
{
533
binder_inner_proc_lock(proc);
534
binder_dequeue_work_ilocked(work);
535
binder_inner_proc_unlock(proc);
536
}
537
538
static struct binder_work *binder_dequeue_work_head_ilocked(
539
struct list_head *list)
540
{
541
struct binder_work *w;
542
543
w = list_first_entry_or_null(list, struct binder_work, entry);
544
if (w)
545
list_del_init(&w->entry);
546
return w;
547
}
548
549
static void
550
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
551
static void binder_free_thread(struct binder_thread *thread);
552
static void binder_free_proc(struct binder_proc *proc);
553
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
554
555
static bool binder_has_work_ilocked(struct binder_thread *thread,
556
bool do_proc_work)
557
{
558
return thread->process_todo ||
559
thread->looper_need_return ||
560
(do_proc_work &&
561
!binder_worklist_empty_ilocked(&thread->proc->todo));
562
}
563
564
static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
565
{
566
bool has_work;
567
568
binder_inner_proc_lock(thread->proc);
569
has_work = binder_has_work_ilocked(thread, do_proc_work);
570
binder_inner_proc_unlock(thread->proc);
571
572
return has_work;
573
}
574
575
static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
576
{
577
return !thread->transaction_stack &&
578
binder_worklist_empty_ilocked(&thread->todo);
579
}
580
581
static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
582
bool sync)
583
{
584
struct rb_node *n;
585
struct binder_thread *thread;
586
587
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
588
thread = rb_entry(n, struct binder_thread, rb_node);
589
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
590
binder_available_for_proc_work_ilocked(thread)) {
591
if (sync)
592
wake_up_interruptible_sync(&thread->wait);
593
else
594
wake_up_interruptible(&thread->wait);
595
}
596
}
597
}
598
599
/**
600
* binder_select_thread_ilocked() - selects a thread for doing proc work.
601
* @proc: process to select a thread from
602
*
603
* Note that calling this function moves the thread off the waiting_threads
604
* list, so it can only be woken up by the caller of this function, or a
605
* signal. Therefore, callers *should* always wake up the thread this function
606
* returns.
607
*
608
* Return: If there's a thread currently waiting for process work,
609
* returns that thread. Otherwise returns NULL.
610
*/
611
static struct binder_thread *
612
binder_select_thread_ilocked(struct binder_proc *proc)
613
{
614
struct binder_thread *thread;
615
616
assert_spin_locked(&proc->inner_lock);
617
thread = list_first_entry_or_null(&proc->waiting_threads,
618
struct binder_thread,
619
waiting_thread_node);
620
621
if (thread)
622
list_del_init(&thread->waiting_thread_node);
623
624
return thread;
625
}
626
627
/**
628
* binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
629
* @proc: process to wake up a thread in
630
* @thread: specific thread to wake-up (may be NULL)
631
* @sync: whether to do a synchronous wake-up
632
*
633
* This function wakes up a thread in the @proc process.
634
* The caller may provide a specific thread to wake-up in
635
* the @thread parameter. If @thread is NULL, this function
636
* will wake up threads that have called poll().
637
*
638
* Note that for this function to work as expected, callers
639
* should first call binder_select_thread() to find a thread
640
* to handle the work (if they don't have a thread already),
641
* and pass the result into the @thread parameter.
642
*/
643
static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
644
struct binder_thread *thread,
645
bool sync)
646
{
647
assert_spin_locked(&proc->inner_lock);
648
649
if (thread) {
650
if (sync)
651
wake_up_interruptible_sync(&thread->wait);
652
else
653
wake_up_interruptible(&thread->wait);
654
return;
655
}
656
657
/* Didn't find a thread waiting for proc work; this can happen
658
* in two scenarios:
659
* 1. All threads are busy handling transactions
660
* In that case, one of those threads should call back into
661
* the kernel driver soon and pick up this work.
662
* 2. Threads are using the (e)poll interface, in which case
663
* they may be blocked on the waitqueue without having been
664
* added to waiting_threads. For this case, we just iterate
665
* over all threads not handling transaction work, and
666
* wake them all up. We wake all because we don't know whether
667
* a thread that called into (e)poll is handling non-binder
668
* work currently.
669
*/
670
binder_wakeup_poll_threads_ilocked(proc, sync);
671
}
672
673
static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
674
{
675
struct binder_thread *thread = binder_select_thread_ilocked(proc);
676
677
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
678
}
679
680
static void binder_set_nice(long nice)
681
{
682
long min_nice;
683
684
if (can_nice(current, nice)) {
685
set_user_nice(current, nice);
686
return;
687
}
688
min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
689
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
690
"%d: nice value %ld not allowed use %ld instead\n",
691
current->pid, nice, min_nice);
692
set_user_nice(current, min_nice);
693
if (min_nice <= MAX_NICE)
694
return;
695
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
696
}
697
698
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
699
binder_uintptr_t ptr)
700
{
701
struct rb_node *n = proc->nodes.rb_node;
702
struct binder_node *node;
703
704
assert_spin_locked(&proc->inner_lock);
705
706
while (n) {
707
node = rb_entry(n, struct binder_node, rb_node);
708
709
if (ptr < node->ptr)
710
n = n->rb_left;
711
else if (ptr > node->ptr)
712
n = n->rb_right;
713
else {
714
/*
715
* take an implicit weak reference
716
* to ensure node stays alive until
717
* call to binder_put_node()
718
*/
719
binder_inc_node_tmpref_ilocked(node);
720
return node;
721
}
722
}
723
return NULL;
724
}
725
726
static struct binder_node *binder_get_node(struct binder_proc *proc,
727
binder_uintptr_t ptr)
728
{
729
struct binder_node *node;
730
731
binder_inner_proc_lock(proc);
732
node = binder_get_node_ilocked(proc, ptr);
733
binder_inner_proc_unlock(proc);
734
return node;
735
}
736
737
static struct binder_node *binder_init_node_ilocked(
738
struct binder_proc *proc,
739
struct binder_node *new_node,
740
struct flat_binder_object *fp)
741
{
742
struct rb_node **p = &proc->nodes.rb_node;
743
struct rb_node *parent = NULL;
744
struct binder_node *node;
745
binder_uintptr_t ptr = fp ? fp->binder : 0;
746
binder_uintptr_t cookie = fp ? fp->cookie : 0;
747
__u32 flags = fp ? fp->flags : 0;
748
749
assert_spin_locked(&proc->inner_lock);
750
751
while (*p) {
752
753
parent = *p;
754
node = rb_entry(parent, struct binder_node, rb_node);
755
756
if (ptr < node->ptr)
757
p = &(*p)->rb_left;
758
else if (ptr > node->ptr)
759
p = &(*p)->rb_right;
760
else {
761
/*
762
* A matching node is already in
763
* the rb tree. Abandon the init
764
* and return it.
765
*/
766
binder_inc_node_tmpref_ilocked(node);
767
return node;
768
}
769
}
770
node = new_node;
771
binder_stats_created(BINDER_STAT_NODE);
772
node->tmp_refs++;
773
rb_link_node(&node->rb_node, parent, p);
774
rb_insert_color(&node->rb_node, &proc->nodes);
775
node->debug_id = atomic_inc_return(&binder_last_id);
776
node->proc = proc;
777
node->ptr = ptr;
778
node->cookie = cookie;
779
node->work.type = BINDER_WORK_NODE;
780
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
781
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
782
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
783
spin_lock_init(&node->lock);
784
INIT_LIST_HEAD(&node->work.entry);
785
INIT_LIST_HEAD(&node->async_todo);
786
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
787
"%d:%d node %d u%016llx c%016llx created\n",
788
proc->pid, current->pid, node->debug_id,
789
(u64)node->ptr, (u64)node->cookie);
790
791
return node;
792
}
793
794
static struct binder_node *binder_new_node(struct binder_proc *proc,
795
struct flat_binder_object *fp)
796
{
797
struct binder_node *node;
798
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
799
800
if (!new_node)
801
return NULL;
802
binder_inner_proc_lock(proc);
803
node = binder_init_node_ilocked(proc, new_node, fp);
804
binder_inner_proc_unlock(proc);
805
if (node != new_node)
806
/*
807
* The node was already added by another thread
808
*/
809
kfree(new_node);
810
811
return node;
812
}
813
814
static void binder_free_node(struct binder_node *node)
815
{
816
kfree(node);
817
binder_stats_deleted(BINDER_STAT_NODE);
818
}
819
820
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
821
int internal,
822
struct list_head *target_list)
823
{
824
struct binder_proc *proc = node->proc;
825
826
assert_spin_locked(&node->lock);
827
if (proc)
828
assert_spin_locked(&proc->inner_lock);
829
if (strong) {
830
if (internal) {
831
if (target_list == NULL &&
832
node->internal_strong_refs == 0 &&
833
!(node->proc &&
834
node == node->proc->context->binder_context_mgr_node &&
835
node->has_strong_ref)) {
836
pr_err("invalid inc strong node for %d\n",
837
node->debug_id);
838
return -EINVAL;
839
}
840
node->internal_strong_refs++;
841
} else
842
node->local_strong_refs++;
843
if (!node->has_strong_ref && target_list) {
844
struct binder_thread *thread = container_of(target_list,
845
struct binder_thread, todo);
846
binder_dequeue_work_ilocked(&node->work);
847
BUG_ON(&thread->todo != target_list);
848
binder_enqueue_deferred_thread_work_ilocked(thread,
849
&node->work);
850
}
851
} else {
852
if (!internal)
853
node->local_weak_refs++;
854
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
855
if (target_list == NULL) {
856
pr_err("invalid inc weak node for %d\n",
857
node->debug_id);
858
return -EINVAL;
859
}
860
/*
861
* See comment above
862
*/
863
binder_enqueue_work_ilocked(&node->work, target_list);
864
}
865
}
866
return 0;
867
}
868
869
static int binder_inc_node(struct binder_node *node, int strong, int internal,
870
struct list_head *target_list)
871
{
872
int ret;
873
874
binder_node_inner_lock(node);
875
ret = binder_inc_node_nilocked(node, strong, internal, target_list);
876
binder_node_inner_unlock(node);
877
878
return ret;
879
}
880
881
static bool binder_dec_node_nilocked(struct binder_node *node,
882
int strong, int internal)
883
{
884
struct binder_proc *proc = node->proc;
885
886
assert_spin_locked(&node->lock);
887
if (proc)
888
assert_spin_locked(&proc->inner_lock);
889
if (strong) {
890
if (internal)
891
node->internal_strong_refs--;
892
else
893
node->local_strong_refs--;
894
if (node->local_strong_refs || node->internal_strong_refs)
895
return false;
896
} else {
897
if (!internal)
898
node->local_weak_refs--;
899
if (node->local_weak_refs || node->tmp_refs ||
900
!hlist_empty(&node->refs))
901
return false;
902
}
903
904
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
905
if (list_empty(&node->work.entry)) {
906
binder_enqueue_work_ilocked(&node->work, &proc->todo);
907
binder_wakeup_proc_ilocked(proc);
908
}
909
} else {
910
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
911
!node->local_weak_refs && !node->tmp_refs) {
912
if (proc) {
913
binder_dequeue_work_ilocked(&node->work);
914
rb_erase(&node->rb_node, &proc->nodes);
915
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
916
"refless node %d deleted\n",
917
node->debug_id);
918
} else {
919
BUG_ON(!list_empty(&node->work.entry));
920
spin_lock(&binder_dead_nodes_lock);
921
/*
922
* tmp_refs could have changed so
923
* check it again
924
*/
925
if (node->tmp_refs) {
926
spin_unlock(&binder_dead_nodes_lock);
927
return false;
928
}
929
hlist_del(&node->dead_node);
930
spin_unlock(&binder_dead_nodes_lock);
931
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
932
"dead node %d deleted\n",
933
node->debug_id);
934
}
935
return true;
936
}
937
}
938
return false;
939
}
940
941
static void binder_dec_node(struct binder_node *node, int strong, int internal)
942
{
943
bool free_node;
944
945
binder_node_inner_lock(node);
946
free_node = binder_dec_node_nilocked(node, strong, internal);
947
binder_node_inner_unlock(node);
948
if (free_node)
949
binder_free_node(node);
950
}
951
952
static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
953
{
954
/*
955
* No call to binder_inc_node() is needed since we
956
* don't need to inform userspace of any changes to
957
* tmp_refs
958
*/
959
node->tmp_refs++;
960
}
961
962
/**
963
* binder_inc_node_tmpref() - take a temporary reference on node
964
* @node: node to reference
965
*
966
* Take reference on node to prevent the node from being freed
967
* while referenced only by a local variable. The inner lock is
968
* needed to serialize with the node work on the queue (which
969
* isn't needed after the node is dead). If the node is dead
970
* (node->proc is NULL), use binder_dead_nodes_lock to protect
971
* node->tmp_refs against dead-node-only cases where the node
972
* lock cannot be acquired (eg traversing the dead node list to
973
* print nodes)
974
*/
975
static void binder_inc_node_tmpref(struct binder_node *node)
976
{
977
binder_node_lock(node);
978
if (node->proc)
979
binder_inner_proc_lock(node->proc);
980
else
981
spin_lock(&binder_dead_nodes_lock);
982
binder_inc_node_tmpref_ilocked(node);
983
if (node->proc)
984
binder_inner_proc_unlock(node->proc);
985
else
986
spin_unlock(&binder_dead_nodes_lock);
987
binder_node_unlock(node);
988
}
989
990
/**
991
* binder_dec_node_tmpref() - remove a temporary reference on node
992
* @node: node to reference
993
*
994
* Release temporary reference on node taken via binder_inc_node_tmpref()
995
*/
996
static void binder_dec_node_tmpref(struct binder_node *node)
997
{
998
bool free_node;
999
1000
binder_node_inner_lock(node);
1001
if (!node->proc)
1002
spin_lock(&binder_dead_nodes_lock);
1003
else
1004
__acquire(&binder_dead_nodes_lock);
1005
node->tmp_refs--;
1006
BUG_ON(node->tmp_refs < 0);
1007
if (!node->proc)
1008
spin_unlock(&binder_dead_nodes_lock);
1009
else
1010
__release(&binder_dead_nodes_lock);
1011
/*
1012
* Call binder_dec_node() to check if all refcounts are 0
1013
* and cleanup is needed. Calling with strong=0 and internal=1
1014
* causes no actual reference to be released in binder_dec_node().
1015
* If that changes, a change is needed here too.
1016
*/
1017
free_node = binder_dec_node_nilocked(node, 0, 1);
1018
binder_node_inner_unlock(node);
1019
if (free_node)
1020
binder_free_node(node);
1021
}
1022
1023
static void binder_put_node(struct binder_node *node)
1024
{
1025
binder_dec_node_tmpref(node);
1026
}
1027
1028
static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1029
u32 desc, bool need_strong_ref)
1030
{
1031
struct rb_node *n = proc->refs_by_desc.rb_node;
1032
struct binder_ref *ref;
1033
1034
while (n) {
1035
ref = rb_entry(n, struct binder_ref, rb_node_desc);
1036
1037
if (desc < ref->data.desc) {
1038
n = n->rb_left;
1039
} else if (desc > ref->data.desc) {
1040
n = n->rb_right;
1041
} else if (need_strong_ref && !ref->data.strong) {
1042
binder_user_error("tried to use weak ref as strong ref\n");
1043
return NULL;
1044
} else {
1045
return ref;
1046
}
1047
}
1048
return NULL;
1049
}
1050
1051
/* Find the smallest unused descriptor the "slow way" */
1052
static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1053
{
1054
struct binder_ref *ref;
1055
struct rb_node *n;
1056
u32 desc;
1057
1058
desc = offset;
1059
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1060
ref = rb_entry(n, struct binder_ref, rb_node_desc);
1061
if (ref->data.desc > desc)
1062
break;
1063
desc = ref->data.desc + 1;
1064
}
1065
1066
return desc;
1067
}
1068
1069
/*
1070
* Find an available reference descriptor ID. The proc->outer_lock might
1071
* be released in the process, in which case -EAGAIN is returned and the
1072
* @desc should be considered invalid.
1073
*/
1074
static int get_ref_desc_olocked(struct binder_proc *proc,
1075
struct binder_node *node,
1076
u32 *desc)
1077
{
1078
struct dbitmap *dmap = &proc->dmap;
1079
unsigned int nbits, offset;
1080
unsigned long *new, bit;
1081
1082
/* 0 is reserved for the context manager */
1083
offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1084
1085
if (!dbitmap_enabled(dmap)) {
1086
*desc = slow_desc_lookup_olocked(proc, offset);
1087
return 0;
1088
}
1089
1090
if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1091
*desc = bit;
1092
return 0;
1093
}
1094
1095
/*
1096
* The dbitmap is full and needs to grow. The proc->outer_lock
1097
* is briefly released to allocate the new bitmap safely.
1098
*/
1099
nbits = dbitmap_grow_nbits(dmap);
1100
binder_proc_unlock(proc);
1101
new = bitmap_zalloc(nbits, GFP_KERNEL);
1102
binder_proc_lock(proc);
1103
dbitmap_grow(dmap, new, nbits);
1104
1105
return -EAGAIN;
1106
}
1107
1108
/**
1109
* binder_get_ref_for_node_olocked() - get the ref associated with given node
1110
* @proc: binder_proc that owns the ref
1111
* @node: binder_node of target
1112
* @new_ref: newly allocated binder_ref to be initialized or %NULL
1113
*
1114
* Look up the ref for the given node and return it if it exists
1115
*
1116
* If it doesn't exist and the caller provides a newly allocated
1117
* ref, initialize the fields of the newly allocated ref and insert
1118
* into the given proc rb_trees and node refs list.
1119
*
1120
* Return: the ref for node. It is possible that another thread
1121
* allocated/initialized the ref first in which case the
1122
* returned ref would be different than the passed-in
1123
* new_ref. new_ref must be kfree'd by the caller in
1124
* this case.
1125
*/
1126
static struct binder_ref *binder_get_ref_for_node_olocked(
1127
struct binder_proc *proc,
1128
struct binder_node *node,
1129
struct binder_ref *new_ref)
1130
{
1131
struct binder_ref *ref;
1132
struct rb_node *parent;
1133
struct rb_node **p;
1134
u32 desc;
1135
1136
retry:
1137
p = &proc->refs_by_node.rb_node;
1138
parent = NULL;
1139
while (*p) {
1140
parent = *p;
1141
ref = rb_entry(parent, struct binder_ref, rb_node_node);
1142
1143
if (node < ref->node)
1144
p = &(*p)->rb_left;
1145
else if (node > ref->node)
1146
p = &(*p)->rb_right;
1147
else
1148
return ref;
1149
}
1150
if (!new_ref)
1151
return NULL;
1152
1153
/* might release the proc->outer_lock */
1154
if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1155
goto retry;
1156
1157
binder_stats_created(BINDER_STAT_REF);
1158
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1159
new_ref->proc = proc;
1160
new_ref->node = node;
1161
rb_link_node(&new_ref->rb_node_node, parent, p);
1162
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1163
1164
new_ref->data.desc = desc;
1165
p = &proc->refs_by_desc.rb_node;
1166
while (*p) {
1167
parent = *p;
1168
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1169
1170
if (new_ref->data.desc < ref->data.desc)
1171
p = &(*p)->rb_left;
1172
else if (new_ref->data.desc > ref->data.desc)
1173
p = &(*p)->rb_right;
1174
else
1175
BUG();
1176
}
1177
rb_link_node(&new_ref->rb_node_desc, parent, p);
1178
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1179
1180
binder_node_lock(node);
1181
hlist_add_head(&new_ref->node_entry, &node->refs);
1182
1183
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1184
"%d new ref %d desc %d for node %d\n",
1185
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1186
node->debug_id);
1187
binder_node_unlock(node);
1188
return new_ref;
1189
}
1190
1191
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1192
{
1193
struct dbitmap *dmap = &ref->proc->dmap;
1194
bool delete_node = false;
1195
1196
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1197
"%d delete ref %d desc %d for node %d\n",
1198
ref->proc->pid, ref->data.debug_id, ref->data.desc,
1199
ref->node->debug_id);
1200
1201
if (dbitmap_enabled(dmap))
1202
dbitmap_clear_bit(dmap, ref->data.desc);
1203
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1204
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1205
1206
binder_node_inner_lock(ref->node);
1207
if (ref->data.strong)
1208
binder_dec_node_nilocked(ref->node, 1, 1);
1209
1210
hlist_del(&ref->node_entry);
1211
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1212
binder_node_inner_unlock(ref->node);
1213
/*
1214
* Clear ref->node unless we want the caller to free the node
1215
*/
1216
if (!delete_node) {
1217
/*
1218
* The caller uses ref->node to determine
1219
* whether the node needs to be freed. Clear
1220
* it since the node is still alive.
1221
*/
1222
ref->node = NULL;
1223
}
1224
1225
if (ref->death) {
1226
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1227
"%d delete ref %d desc %d has death notification\n",
1228
ref->proc->pid, ref->data.debug_id,
1229
ref->data.desc);
1230
binder_dequeue_work(ref->proc, &ref->death->work);
1231
binder_stats_deleted(BINDER_STAT_DEATH);
1232
}
1233
1234
if (ref->freeze) {
1235
binder_dequeue_work(ref->proc, &ref->freeze->work);
1236
binder_stats_deleted(BINDER_STAT_FREEZE);
1237
}
1238
1239
binder_stats_deleted(BINDER_STAT_REF);
1240
}
1241
1242
/**
1243
* binder_inc_ref_olocked() - increment the ref for given handle
1244
* @ref: ref to be incremented
1245
* @strong: if true, strong increment, else weak
1246
* @target_list: list to queue node work on
1247
*
1248
* Increment the ref. @ref->proc->outer_lock must be held on entry
1249
*
1250
* Return: 0, if successful, else errno
1251
*/
1252
static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1253
struct list_head *target_list)
1254
{
1255
int ret;
1256
1257
if (strong) {
1258
if (ref->data.strong == 0) {
1259
ret = binder_inc_node(ref->node, 1, 1, target_list);
1260
if (ret)
1261
return ret;
1262
}
1263
ref->data.strong++;
1264
} else {
1265
if (ref->data.weak == 0) {
1266
ret = binder_inc_node(ref->node, 0, 1, target_list);
1267
if (ret)
1268
return ret;
1269
}
1270
ref->data.weak++;
1271
}
1272
return 0;
1273
}
1274
1275
/**
1276
* binder_dec_ref_olocked() - dec the ref for given handle
1277
* @ref: ref to be decremented
1278
* @strong: if true, strong decrement, else weak
1279
*
1280
* Decrement the ref.
1281
*
1282
* Return: %true if ref is cleaned up and ready to be freed.
1283
*/
1284
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1285
{
1286
if (strong) {
1287
if (ref->data.strong == 0) {
1288
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1289
ref->proc->pid, ref->data.debug_id,
1290
ref->data.desc, ref->data.strong,
1291
ref->data.weak);
1292
return false;
1293
}
1294
ref->data.strong--;
1295
if (ref->data.strong == 0)
1296
binder_dec_node(ref->node, strong, 1);
1297
} else {
1298
if (ref->data.weak == 0) {
1299
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1300
ref->proc->pid, ref->data.debug_id,
1301
ref->data.desc, ref->data.strong,
1302
ref->data.weak);
1303
return false;
1304
}
1305
ref->data.weak--;
1306
}
1307
if (ref->data.strong == 0 && ref->data.weak == 0) {
1308
binder_cleanup_ref_olocked(ref);
1309
return true;
1310
}
1311
return false;
1312
}
1313
1314
/**
1315
* binder_get_node_from_ref() - get the node from the given proc/desc
1316
* @proc: proc containing the ref
1317
* @desc: the handle associated with the ref
1318
* @need_strong_ref: if true, only return node if ref is strong
1319
* @rdata: the id/refcount data for the ref
1320
*
1321
* Given a proc and ref handle, return the associated binder_node
1322
*
1323
* Return: a binder_node or NULL if not found or not strong when strong required
1324
*/
1325
static struct binder_node *binder_get_node_from_ref(
1326
struct binder_proc *proc,
1327
u32 desc, bool need_strong_ref,
1328
struct binder_ref_data *rdata)
1329
{
1330
struct binder_node *node;
1331
struct binder_ref *ref;
1332
1333
binder_proc_lock(proc);
1334
ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1335
if (!ref)
1336
goto err_no_ref;
1337
node = ref->node;
1338
/*
1339
* Take an implicit reference on the node to ensure
1340
* it stays alive until the call to binder_put_node()
1341
*/
1342
binder_inc_node_tmpref(node);
1343
if (rdata)
1344
*rdata = ref->data;
1345
binder_proc_unlock(proc);
1346
1347
return node;
1348
1349
err_no_ref:
1350
binder_proc_unlock(proc);
1351
return NULL;
1352
}
1353
1354
/**
1355
* binder_free_ref() - free the binder_ref
1356
* @ref: ref to free
1357
*
1358
* Free the binder_ref. Free the binder_node indicated by ref->node
1359
* (if non-NULL) and the binder_ref_death indicated by ref->death.
1360
*/
1361
static void binder_free_ref(struct binder_ref *ref)
1362
{
1363
if (ref->node)
1364
binder_free_node(ref->node);
1365
kfree(ref->death);
1366
kfree(ref->freeze);
1367
kfree(ref);
1368
}
1369
1370
/* shrink descriptor bitmap if needed */
1371
static void try_shrink_dmap(struct binder_proc *proc)
1372
{
1373
unsigned long *new;
1374
int nbits;
1375
1376
binder_proc_lock(proc);
1377
nbits = dbitmap_shrink_nbits(&proc->dmap);
1378
binder_proc_unlock(proc);
1379
1380
if (!nbits)
1381
return;
1382
1383
new = bitmap_zalloc(nbits, GFP_KERNEL);
1384
binder_proc_lock(proc);
1385
dbitmap_shrink(&proc->dmap, new, nbits);
1386
binder_proc_unlock(proc);
1387
}
1388
1389
/**
1390
* binder_update_ref_for_handle() - inc/dec the ref for given handle
1391
* @proc: proc containing the ref
1392
* @desc: the handle associated with the ref
1393
* @increment: true=inc reference, false=dec reference
1394
* @strong: true=strong reference, false=weak reference
1395
* @rdata: the id/refcount data for the ref
1396
*
1397
* Given a proc and ref handle, increment or decrement the ref
1398
* according to "increment" arg.
1399
*
1400
* Return: 0 if successful, else errno
1401
*/
1402
static int binder_update_ref_for_handle(struct binder_proc *proc,
1403
uint32_t desc, bool increment, bool strong,
1404
struct binder_ref_data *rdata)
1405
{
1406
int ret = 0;
1407
struct binder_ref *ref;
1408
bool delete_ref = false;
1409
1410
binder_proc_lock(proc);
1411
ref = binder_get_ref_olocked(proc, desc, strong);
1412
if (!ref) {
1413
ret = -EINVAL;
1414
goto err_no_ref;
1415
}
1416
if (increment)
1417
ret = binder_inc_ref_olocked(ref, strong, NULL);
1418
else
1419
delete_ref = binder_dec_ref_olocked(ref, strong);
1420
1421
if (rdata)
1422
*rdata = ref->data;
1423
binder_proc_unlock(proc);
1424
1425
if (delete_ref) {
1426
binder_free_ref(ref);
1427
try_shrink_dmap(proc);
1428
}
1429
return ret;
1430
1431
err_no_ref:
1432
binder_proc_unlock(proc);
1433
return ret;
1434
}
1435
1436
/**
1437
* binder_dec_ref_for_handle() - dec the ref for given handle
1438
* @proc: proc containing the ref
1439
* @desc: the handle associated with the ref
1440
* @strong: true=strong reference, false=weak reference
1441
* @rdata: the id/refcount data for the ref
1442
*
1443
* Just calls binder_update_ref_for_handle() to decrement the ref.
1444
*
1445
* Return: 0 if successful, else errno
1446
*/
1447
static int binder_dec_ref_for_handle(struct binder_proc *proc,
1448
uint32_t desc, bool strong, struct binder_ref_data *rdata)
1449
{
1450
return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1451
}
1452
1453
1454
/**
1455
* binder_inc_ref_for_node() - increment the ref for given proc/node
1456
* @proc: proc containing the ref
1457
* @node: target node
1458
* @strong: true=strong reference, false=weak reference
1459
* @target_list: worklist to use if node is incremented
1460
* @rdata: the id/refcount data for the ref
1461
*
1462
* Given a proc and node, increment the ref. Create the ref if it
1463
* doesn't already exist
1464
*
1465
* Return: 0 if successful, else errno
1466
*/
1467
static int binder_inc_ref_for_node(struct binder_proc *proc,
1468
struct binder_node *node,
1469
bool strong,
1470
struct list_head *target_list,
1471
struct binder_ref_data *rdata)
1472
{
1473
struct binder_ref *ref;
1474
struct binder_ref *new_ref = NULL;
1475
int ret = 0;
1476
1477
binder_proc_lock(proc);
1478
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1479
if (!ref) {
1480
binder_proc_unlock(proc);
1481
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1482
if (!new_ref)
1483
return -ENOMEM;
1484
binder_proc_lock(proc);
1485
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1486
}
1487
ret = binder_inc_ref_olocked(ref, strong, target_list);
1488
*rdata = ref->data;
1489
if (ret && ref == new_ref) {
1490
/*
1491
* Cleanup the failed reference here as the target
1492
* could now be dead and have already released its
1493
* references by now. Calling on the new reference
1494
* with strong=0 and a tmp_refs will not decrement
1495
* the node. The new_ref gets kfree'd below.
1496
*/
1497
binder_cleanup_ref_olocked(new_ref);
1498
ref = NULL;
1499
}
1500
1501
binder_proc_unlock(proc);
1502
if (new_ref && ref != new_ref)
1503
/*
1504
* Another thread created the ref first so
1505
* free the one we allocated
1506
*/
1507
kfree(new_ref);
1508
return ret;
1509
}
1510
1511
static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1512
struct binder_transaction *t)
1513
{
1514
BUG_ON(!target_thread);
1515
assert_spin_locked(&target_thread->proc->inner_lock);
1516
BUG_ON(target_thread->transaction_stack != t);
1517
BUG_ON(target_thread->transaction_stack->from != target_thread);
1518
target_thread->transaction_stack =
1519
target_thread->transaction_stack->from_parent;
1520
t->from = NULL;
1521
}
1522
1523
/**
1524
* binder_thread_dec_tmpref() - decrement thread->tmp_ref
1525
* @thread: thread to decrement
1526
*
1527
* A thread needs to be kept alive while being used to create or
1528
* handle a transaction. binder_get_txn_from() is used to safely
1529
* extract t->from from a binder_transaction and keep the thread
1530
* indicated by t->from from being freed. When done with that
1531
* binder_thread, this function is called to decrement the
1532
* tmp_ref and free if appropriate (thread has been released
1533
* and no transaction being processed by the driver)
1534
*/
1535
static void binder_thread_dec_tmpref(struct binder_thread *thread)
1536
{
1537
/*
1538
* atomic is used to protect the counter value while
1539
* it cannot reach zero or thread->is_dead is false
1540
*/
1541
binder_inner_proc_lock(thread->proc);
1542
atomic_dec(&thread->tmp_ref);
1543
if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1544
binder_inner_proc_unlock(thread->proc);
1545
binder_free_thread(thread);
1546
return;
1547
}
1548
binder_inner_proc_unlock(thread->proc);
1549
}
1550
1551
/**
1552
* binder_proc_dec_tmpref() - decrement proc->tmp_ref
1553
* @proc: proc to decrement
1554
*
1555
* A binder_proc needs to be kept alive while being used to create or
1556
* handle a transaction. proc->tmp_ref is incremented when
1557
* creating a new transaction or the binder_proc is currently in-use
1558
* by threads that are being released. When done with the binder_proc,
1559
* this function is called to decrement the counter and free the
1560
* proc if appropriate (proc has been released, all threads have
1561
* been released and not currently in-use to process a transaction).
1562
*/
1563
static void binder_proc_dec_tmpref(struct binder_proc *proc)
1564
{
1565
binder_inner_proc_lock(proc);
1566
proc->tmp_ref--;
1567
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1568
!proc->tmp_ref) {
1569
binder_inner_proc_unlock(proc);
1570
binder_free_proc(proc);
1571
return;
1572
}
1573
binder_inner_proc_unlock(proc);
1574
}
1575
1576
/**
1577
* binder_get_txn_from() - safely extract the "from" thread in transaction
1578
* @t: binder transaction for t->from
1579
*
1580
* Atomically return the "from" thread and increment the tmp_ref
1581
* count for the thread to ensure it stays alive until
1582
* binder_thread_dec_tmpref() is called.
1583
*
1584
* Return: the value of t->from
1585
*/
1586
static struct binder_thread *binder_get_txn_from(
1587
struct binder_transaction *t)
1588
{
1589
struct binder_thread *from;
1590
1591
guard(spinlock)(&t->lock);
1592
from = t->from;
1593
if (from)
1594
atomic_inc(&from->tmp_ref);
1595
return from;
1596
}
1597
1598
/**
1599
* binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1600
* @t: binder transaction for t->from
1601
*
1602
* Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1603
* to guarantee that the thread cannot be released while operating on it.
1604
* The caller must call binder_inner_proc_unlock() to release the inner lock
1605
* as well as call binder_dec_thread_txn() to release the reference.
1606
*
1607
* Return: the value of t->from
1608
*/
1609
static struct binder_thread *binder_get_txn_from_and_acq_inner(
1610
struct binder_transaction *t)
1611
__acquires(&t->from->proc->inner_lock)
1612
{
1613
struct binder_thread *from;
1614
1615
from = binder_get_txn_from(t);
1616
if (!from) {
1617
__acquire(&from->proc->inner_lock);
1618
return NULL;
1619
}
1620
binder_inner_proc_lock(from->proc);
1621
if (t->from) {
1622
BUG_ON(from != t->from);
1623
return from;
1624
}
1625
binder_inner_proc_unlock(from->proc);
1626
__acquire(&from->proc->inner_lock);
1627
binder_thread_dec_tmpref(from);
1628
return NULL;
1629
}
1630
1631
/**
1632
* binder_free_txn_fixups() - free unprocessed fd fixups
1633
* @t: binder transaction for t->from
1634
*
1635
* If the transaction is being torn down prior to being
1636
* processed by the target process, free all of the
1637
* fd fixups and fput the file structs. It is safe to
1638
* call this function after the fixups have been
1639
* processed -- in that case, the list will be empty.
1640
*/
1641
static void binder_free_txn_fixups(struct binder_transaction *t)
1642
{
1643
struct binder_txn_fd_fixup *fixup, *tmp;
1644
1645
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1646
fput(fixup->file);
1647
if (fixup->target_fd >= 0)
1648
put_unused_fd(fixup->target_fd);
1649
list_del(&fixup->fixup_entry);
1650
kfree(fixup);
1651
}
1652
}
1653
1654
static void binder_txn_latency_free(struct binder_transaction *t)
1655
{
1656
int from_proc, from_thread, to_proc, to_thread;
1657
1658
spin_lock(&t->lock);
1659
from_proc = t->from ? t->from->proc->pid : 0;
1660
from_thread = t->from ? t->from->pid : 0;
1661
to_proc = t->to_proc ? t->to_proc->pid : 0;
1662
to_thread = t->to_thread ? t->to_thread->pid : 0;
1663
spin_unlock(&t->lock);
1664
1665
trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1666
}
1667
1668
static void binder_free_transaction(struct binder_transaction *t)
1669
{
1670
struct binder_proc *target_proc = t->to_proc;
1671
1672
if (target_proc) {
1673
binder_inner_proc_lock(target_proc);
1674
target_proc->outstanding_txns--;
1675
if (target_proc->outstanding_txns < 0)
1676
pr_warn("%s: Unexpected outstanding_txns %d\n",
1677
__func__, target_proc->outstanding_txns);
1678
if (!target_proc->outstanding_txns && target_proc->is_frozen)
1679
wake_up_interruptible_all(&target_proc->freeze_wait);
1680
if (t->buffer)
1681
t->buffer->transaction = NULL;
1682
binder_inner_proc_unlock(target_proc);
1683
}
1684
if (trace_binder_txn_latency_free_enabled())
1685
binder_txn_latency_free(t);
1686
/*
1687
* If the transaction has no target_proc, then
1688
* t->buffer->transaction has already been cleared.
1689
*/
1690
binder_free_txn_fixups(t);
1691
kfree(t);
1692
binder_stats_deleted(BINDER_STAT_TRANSACTION);
1693
}
1694
1695
static void binder_send_failed_reply(struct binder_transaction *t,
1696
uint32_t error_code)
1697
{
1698
struct binder_thread *target_thread;
1699
struct binder_transaction *next;
1700
1701
BUG_ON(t->flags & TF_ONE_WAY);
1702
while (1) {
1703
target_thread = binder_get_txn_from_and_acq_inner(t);
1704
if (target_thread) {
1705
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1706
"send failed reply for transaction %d to %d:%d\n",
1707
t->debug_id,
1708
target_thread->proc->pid,
1709
target_thread->pid);
1710
1711
binder_pop_transaction_ilocked(target_thread, t);
1712
if (target_thread->reply_error.cmd == BR_OK) {
1713
target_thread->reply_error.cmd = error_code;
1714
binder_enqueue_thread_work_ilocked(
1715
target_thread,
1716
&target_thread->reply_error.work);
1717
wake_up_interruptible(&target_thread->wait);
1718
} else {
1719
/*
1720
* Cannot get here for normal operation, but
1721
* we can if multiple synchronous transactions
1722
* are sent without blocking for responses.
1723
* Just ignore the 2nd error in this case.
1724
*/
1725
pr_warn("Unexpected reply error: %u\n",
1726
target_thread->reply_error.cmd);
1727
}
1728
binder_inner_proc_unlock(target_thread->proc);
1729
binder_thread_dec_tmpref(target_thread);
1730
binder_free_transaction(t);
1731
return;
1732
}
1733
__release(&target_thread->proc->inner_lock);
1734
next = t->from_parent;
1735
1736
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1737
"send failed reply for transaction %d, target dead\n",
1738
t->debug_id);
1739
1740
binder_free_transaction(t);
1741
if (next == NULL) {
1742
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1743
"reply failed, no target thread at root\n");
1744
return;
1745
}
1746
t = next;
1747
binder_debug(BINDER_DEBUG_DEAD_BINDER,
1748
"reply failed, no target thread -- retry %d\n",
1749
t->debug_id);
1750
}
1751
}
1752
1753
/**
1754
* binder_cleanup_transaction() - cleans up undelivered transaction
1755
* @t: transaction that needs to be cleaned up
1756
* @reason: reason the transaction wasn't delivered
1757
* @error_code: error to return to caller (if synchronous call)
1758
*/
1759
static void binder_cleanup_transaction(struct binder_transaction *t,
1760
const char *reason,
1761
uint32_t error_code)
1762
{
1763
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1764
binder_send_failed_reply(t, error_code);
1765
} else {
1766
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1767
"undelivered transaction %d, %s\n",
1768
t->debug_id, reason);
1769
binder_free_transaction(t);
1770
}
1771
}
1772
1773
/**
1774
* binder_get_object() - gets object and checks for valid metadata
1775
* @proc: binder_proc owning the buffer
1776
* @u: sender's user pointer to base of buffer
1777
* @buffer: binder_buffer that we're parsing.
1778
* @offset: offset in the @buffer at which to validate an object.
1779
* @object: struct binder_object to read into
1780
*
1781
* Copy the binder object at the given offset into @object. If @u is
1782
* provided then the copy is from the sender's buffer. If not, then
1783
* it is copied from the target's @buffer.
1784
*
1785
* Return: If there's a valid metadata object at @offset, the
1786
* size of that object. Otherwise, it returns zero. The object
1787
* is read into the struct binder_object pointed to by @object.
1788
*/
1789
static size_t binder_get_object(struct binder_proc *proc,
1790
const void __user *u,
1791
struct binder_buffer *buffer,
1792
unsigned long offset,
1793
struct binder_object *object)
1794
{
1795
size_t read_size;
1796
struct binder_object_header *hdr;
1797
size_t object_size = 0;
1798
1799
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1800
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1801
!IS_ALIGNED(offset, sizeof(u32)))
1802
return 0;
1803
1804
if (u) {
1805
if (copy_from_user(object, u + offset, read_size))
1806
return 0;
1807
} else {
1808
if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1809
offset, read_size))
1810
return 0;
1811
}
1812
1813
/* Ok, now see if we read a complete object. */
1814
hdr = &object->hdr;
1815
switch (hdr->type) {
1816
case BINDER_TYPE_BINDER:
1817
case BINDER_TYPE_WEAK_BINDER:
1818
case BINDER_TYPE_HANDLE:
1819
case BINDER_TYPE_WEAK_HANDLE:
1820
object_size = sizeof(struct flat_binder_object);
1821
break;
1822
case BINDER_TYPE_FD:
1823
object_size = sizeof(struct binder_fd_object);
1824
break;
1825
case BINDER_TYPE_PTR:
1826
object_size = sizeof(struct binder_buffer_object);
1827
break;
1828
case BINDER_TYPE_FDA:
1829
object_size = sizeof(struct binder_fd_array_object);
1830
break;
1831
default:
1832
return 0;
1833
}
1834
if (offset <= buffer->data_size - object_size &&
1835
buffer->data_size >= object_size)
1836
return object_size;
1837
else
1838
return 0;
1839
}
1840
1841
/**
1842
* binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1843
* @proc: binder_proc owning the buffer
1844
* @b: binder_buffer containing the object
1845
* @object: struct binder_object to read into
1846
* @index: index in offset array at which the binder_buffer_object is
1847
* located
1848
* @start_offset: points to the start of the offset array
1849
* @object_offsetp: offset of @object read from @b
1850
* @num_valid: the number of valid offsets in the offset array
1851
*
1852
* Return: If @index is within the valid range of the offset array
1853
* described by @start and @num_valid, and if there's a valid
1854
* binder_buffer_object at the offset found in index @index
1855
* of the offset array, that object is returned. Otherwise,
1856
* %NULL is returned.
1857
* Note that the offset found in index @index itself is not
1858
* verified; this function assumes that @num_valid elements
1859
* from @start were previously verified to have valid offsets.
1860
* If @object_offsetp is non-NULL, then the offset within
1861
* @b is written to it.
1862
*/
1863
static struct binder_buffer_object *binder_validate_ptr(
1864
struct binder_proc *proc,
1865
struct binder_buffer *b,
1866
struct binder_object *object,
1867
binder_size_t index,
1868
binder_size_t start_offset,
1869
binder_size_t *object_offsetp,
1870
binder_size_t num_valid)
1871
{
1872
size_t object_size;
1873
binder_size_t object_offset;
1874
unsigned long buffer_offset;
1875
1876
if (index >= num_valid)
1877
return NULL;
1878
1879
buffer_offset = start_offset + sizeof(binder_size_t) * index;
1880
if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1881
b, buffer_offset,
1882
sizeof(object_offset)))
1883
return NULL;
1884
object_size = binder_get_object(proc, NULL, b, object_offset, object);
1885
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1886
return NULL;
1887
if (object_offsetp)
1888
*object_offsetp = object_offset;
1889
1890
return &object->bbo;
1891
}
1892
1893
/**
1894
* binder_validate_fixup() - validates pointer/fd fixups happen in order.
1895
* @proc: binder_proc owning the buffer
1896
* @b: transaction buffer
1897
* @objects_start_offset: offset to start of objects buffer
1898
* @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1899
* @fixup_offset: start offset in @buffer to fix up
1900
* @last_obj_offset: offset to last binder_buffer_object that we fixed
1901
* @last_min_offset: minimum fixup offset in object at @last_obj_offset
1902
*
1903
* Return: %true if a fixup in buffer @buffer at offset @offset is
1904
* allowed.
1905
*
1906
* For safety reasons, we only allow fixups inside a buffer to happen
1907
* at increasing offsets; additionally, we only allow fixup on the last
1908
* buffer object that was verified, or one of its parents.
1909
*
1910
* Example of what is allowed:
1911
*
1912
* A
1913
* B (parent = A, offset = 0)
1914
* C (parent = A, offset = 16)
1915
* D (parent = C, offset = 0)
1916
* E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1917
*
1918
* Examples of what is not allowed:
1919
*
1920
* Decreasing offsets within the same parent:
1921
* A
1922
* C (parent = A, offset = 16)
1923
* B (parent = A, offset = 0) // decreasing offset within A
1924
*
1925
* Referring to a parent that wasn't the last object or any of its parents:
1926
* A
1927
* B (parent = A, offset = 0)
1928
* C (parent = A, offset = 0)
1929
* C (parent = A, offset = 16)
1930
* D (parent = B, offset = 0) // B is not A or any of A's parents
1931
*/
1932
static bool binder_validate_fixup(struct binder_proc *proc,
1933
struct binder_buffer *b,
1934
binder_size_t objects_start_offset,
1935
binder_size_t buffer_obj_offset,
1936
binder_size_t fixup_offset,
1937
binder_size_t last_obj_offset,
1938
binder_size_t last_min_offset)
1939
{
1940
if (!last_obj_offset) {
1941
/* Nothing to fix up in */
1942
return false;
1943
}
1944
1945
while (last_obj_offset != buffer_obj_offset) {
1946
unsigned long buffer_offset;
1947
struct binder_object last_object;
1948
struct binder_buffer_object *last_bbo;
1949
size_t object_size = binder_get_object(proc, NULL, b,
1950
last_obj_offset,
1951
&last_object);
1952
if (object_size != sizeof(*last_bbo))
1953
return false;
1954
1955
last_bbo = &last_object.bbo;
1956
/*
1957
* Safe to retrieve the parent of last_obj, since it
1958
* was already previously verified by the driver.
1959
*/
1960
if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1961
return false;
1962
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1963
buffer_offset = objects_start_offset +
1964
sizeof(binder_size_t) * last_bbo->parent;
1965
if (binder_alloc_copy_from_buffer(&proc->alloc,
1966
&last_obj_offset,
1967
b, buffer_offset,
1968
sizeof(last_obj_offset)))
1969
return false;
1970
}
1971
return (fixup_offset >= last_min_offset);
1972
}
1973
1974
/**
1975
* struct binder_task_work_cb - for deferred close
1976
*
1977
* @twork: callback_head for task work
1978
* @file: file to close
1979
*
1980
* Structure to pass task work to be handled after
1981
* returning from binder_ioctl() via task_work_add().
1982
*/
1983
struct binder_task_work_cb {
1984
struct callback_head twork;
1985
struct file *file;
1986
};
1987
1988
/**
1989
* binder_do_fd_close() - close list of file descriptors
1990
* @twork: callback head for task work
1991
*
1992
* It is not safe to call ksys_close() during the binder_ioctl()
1993
* function if there is a chance that binder's own file descriptor
1994
* might be closed. This is to meet the requirements for using
1995
* fdget() (see comments for __fget_light()). Therefore use
1996
* task_work_add() to schedule the close operation once we have
1997
* returned from binder_ioctl(). This function is a callback
1998
* for that mechanism and does the actual ksys_close() on the
1999
* given file descriptor.
2000
*/
2001
static void binder_do_fd_close(struct callback_head *twork)
2002
{
2003
struct binder_task_work_cb *twcb = container_of(twork,
2004
struct binder_task_work_cb, twork);
2005
2006
fput(twcb->file);
2007
kfree(twcb);
2008
}
2009
2010
/**
2011
* binder_deferred_fd_close() - schedule a close for the given file-descriptor
2012
* @fd: file-descriptor to close
2013
*
2014
* See comments in binder_do_fd_close(). This function is used to schedule
2015
* a file-descriptor to be closed after returning from binder_ioctl().
2016
*/
2017
static void binder_deferred_fd_close(int fd)
2018
{
2019
struct binder_task_work_cb *twcb;
2020
2021
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2022
if (!twcb)
2023
return;
2024
init_task_work(&twcb->twork, binder_do_fd_close);
2025
twcb->file = file_close_fd(fd);
2026
if (twcb->file) {
2027
// pin it until binder_do_fd_close(); see comments there
2028
get_file(twcb->file);
2029
filp_close(twcb->file, current->files);
2030
task_work_add(current, &twcb->twork, TWA_RESUME);
2031
} else {
2032
kfree(twcb);
2033
}
2034
}
2035
2036
static void binder_transaction_buffer_release(struct binder_proc *proc,
2037
struct binder_thread *thread,
2038
struct binder_buffer *buffer,
2039
binder_size_t off_end_offset,
2040
bool is_failure)
2041
{
2042
int debug_id = buffer->debug_id;
2043
binder_size_t off_start_offset, buffer_offset;
2044
2045
binder_debug(BINDER_DEBUG_TRANSACTION,
2046
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
2047
proc->pid, buffer->debug_id,
2048
buffer->data_size, buffer->offsets_size,
2049
(unsigned long long)off_end_offset);
2050
2051
if (buffer->target_node)
2052
binder_dec_node(buffer->target_node, 1, 0);
2053
2054
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2055
2056
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2057
buffer_offset += sizeof(binder_size_t)) {
2058
struct binder_object_header *hdr;
2059
size_t object_size = 0;
2060
struct binder_object object;
2061
binder_size_t object_offset;
2062
2063
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2064
buffer, buffer_offset,
2065
sizeof(object_offset)))
2066
object_size = binder_get_object(proc, NULL, buffer,
2067
object_offset, &object);
2068
if (object_size == 0) {
2069
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2070
debug_id, (u64)object_offset, buffer->data_size);
2071
continue;
2072
}
2073
hdr = &object.hdr;
2074
switch (hdr->type) {
2075
case BINDER_TYPE_BINDER:
2076
case BINDER_TYPE_WEAK_BINDER: {
2077
struct flat_binder_object *fp;
2078
struct binder_node *node;
2079
2080
fp = to_flat_binder_object(hdr);
2081
node = binder_get_node(proc, fp->binder);
2082
if (node == NULL) {
2083
pr_err("transaction release %d bad node %016llx\n",
2084
debug_id, (u64)fp->binder);
2085
break;
2086
}
2087
binder_debug(BINDER_DEBUG_TRANSACTION,
2088
" node %d u%016llx\n",
2089
node->debug_id, (u64)node->ptr);
2090
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2091
0);
2092
binder_put_node(node);
2093
} break;
2094
case BINDER_TYPE_HANDLE:
2095
case BINDER_TYPE_WEAK_HANDLE: {
2096
struct flat_binder_object *fp;
2097
struct binder_ref_data rdata;
2098
int ret;
2099
2100
fp = to_flat_binder_object(hdr);
2101
ret = binder_dec_ref_for_handle(proc, fp->handle,
2102
hdr->type == BINDER_TYPE_HANDLE, &rdata);
2103
2104
if (ret) {
2105
pr_err("transaction release %d bad handle %d, ret = %d\n",
2106
debug_id, fp->handle, ret);
2107
break;
2108
}
2109
binder_debug(BINDER_DEBUG_TRANSACTION,
2110
" ref %d desc %d\n",
2111
rdata.debug_id, rdata.desc);
2112
} break;
2113
2114
case BINDER_TYPE_FD: {
2115
/*
2116
* No need to close the file here since user-space
2117
* closes it for successfully delivered
2118
* transactions. For transactions that weren't
2119
* delivered, the new fd was never allocated so
2120
* there is no need to close and the fput on the
2121
* file is done when the transaction is torn
2122
* down.
2123
*/
2124
} break;
2125
case BINDER_TYPE_PTR:
2126
/*
2127
* Nothing to do here, this will get cleaned up when the
2128
* transaction buffer gets freed
2129
*/
2130
break;
2131
case BINDER_TYPE_FDA: {
2132
struct binder_fd_array_object *fda;
2133
struct binder_buffer_object *parent;
2134
struct binder_object ptr_object;
2135
binder_size_t fda_offset;
2136
size_t fd_index;
2137
binder_size_t fd_buf_size;
2138
binder_size_t num_valid;
2139
2140
if (is_failure) {
2141
/*
2142
* The fd fixups have not been applied so no
2143
* fds need to be closed.
2144
*/
2145
continue;
2146
}
2147
2148
num_valid = (buffer_offset - off_start_offset) /
2149
sizeof(binder_size_t);
2150
fda = to_binder_fd_array_object(hdr);
2151
parent = binder_validate_ptr(proc, buffer, &ptr_object,
2152
fda->parent,
2153
off_start_offset,
2154
NULL,
2155
num_valid);
2156
if (!parent) {
2157
pr_err("transaction release %d bad parent offset\n",
2158
debug_id);
2159
continue;
2160
}
2161
fd_buf_size = sizeof(u32) * fda->num_fds;
2162
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2163
pr_err("transaction release %d invalid number of fds (%lld)\n",
2164
debug_id, (u64)fda->num_fds);
2165
continue;
2166
}
2167
if (fd_buf_size > parent->length ||
2168
fda->parent_offset > parent->length - fd_buf_size) {
2169
/* No space for all file descriptors here. */
2170
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2171
debug_id, (u64)fda->num_fds);
2172
continue;
2173
}
2174
/*
2175
* the source data for binder_buffer_object is visible
2176
* to user-space and the @buffer element is the user
2177
* pointer to the buffer_object containing the fd_array.
2178
* Convert the address to an offset relative to
2179
* the base of the transaction buffer.
2180
*/
2181
fda_offset = parent->buffer - buffer->user_data +
2182
fda->parent_offset;
2183
for (fd_index = 0; fd_index < fda->num_fds;
2184
fd_index++) {
2185
u32 fd;
2186
int err;
2187
binder_size_t offset = fda_offset +
2188
fd_index * sizeof(fd);
2189
2190
err = binder_alloc_copy_from_buffer(
2191
&proc->alloc, &fd, buffer,
2192
offset, sizeof(fd));
2193
WARN_ON(err);
2194
if (!err) {
2195
binder_deferred_fd_close(fd);
2196
/*
2197
* Need to make sure the thread goes
2198
* back to userspace to complete the
2199
* deferred close
2200
*/
2201
if (thread)
2202
thread->looper_need_return = true;
2203
}
2204
}
2205
} break;
2206
default:
2207
pr_err("transaction release %d bad object type %x\n",
2208
debug_id, hdr->type);
2209
break;
2210
}
2211
}
2212
}
2213
2214
/* Clean up all the objects in the buffer */
2215
static inline void binder_release_entire_buffer(struct binder_proc *proc,
2216
struct binder_thread *thread,
2217
struct binder_buffer *buffer,
2218
bool is_failure)
2219
{
2220
binder_size_t off_end_offset;
2221
2222
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2223
off_end_offset += buffer->offsets_size;
2224
2225
binder_transaction_buffer_release(proc, thread, buffer,
2226
off_end_offset, is_failure);
2227
}
2228
2229
static int binder_translate_binder(struct flat_binder_object *fp,
2230
struct binder_transaction *t,
2231
struct binder_thread *thread)
2232
{
2233
struct binder_node *node;
2234
struct binder_proc *proc = thread->proc;
2235
struct binder_proc *target_proc = t->to_proc;
2236
struct binder_ref_data rdata;
2237
int ret = 0;
2238
2239
node = binder_get_node(proc, fp->binder);
2240
if (!node) {
2241
node = binder_new_node(proc, fp);
2242
if (!node)
2243
return -ENOMEM;
2244
}
2245
if (fp->cookie != node->cookie) {
2246
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2247
proc->pid, thread->pid, (u64)fp->binder,
2248
node->debug_id, (u64)fp->cookie,
2249
(u64)node->cookie);
2250
ret = -EINVAL;
2251
goto done;
2252
}
2253
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2254
ret = -EPERM;
2255
goto done;
2256
}
2257
2258
ret = binder_inc_ref_for_node(target_proc, node,
2259
fp->hdr.type == BINDER_TYPE_BINDER,
2260
&thread->todo, &rdata);
2261
if (ret)
2262
goto done;
2263
2264
if (fp->hdr.type == BINDER_TYPE_BINDER)
2265
fp->hdr.type = BINDER_TYPE_HANDLE;
2266
else
2267
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2268
fp->binder = 0;
2269
fp->handle = rdata.desc;
2270
fp->cookie = 0;
2271
2272
trace_binder_transaction_node_to_ref(t, node, &rdata);
2273
binder_debug(BINDER_DEBUG_TRANSACTION,
2274
" node %d u%016llx -> ref %d desc %d\n",
2275
node->debug_id, (u64)node->ptr,
2276
rdata.debug_id, rdata.desc);
2277
done:
2278
binder_put_node(node);
2279
return ret;
2280
}
2281
2282
static int binder_translate_handle(struct flat_binder_object *fp,
2283
struct binder_transaction *t,
2284
struct binder_thread *thread)
2285
{
2286
struct binder_proc *proc = thread->proc;
2287
struct binder_proc *target_proc = t->to_proc;
2288
struct binder_node *node;
2289
struct binder_ref_data src_rdata;
2290
int ret = 0;
2291
2292
node = binder_get_node_from_ref(proc, fp->handle,
2293
fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2294
if (!node) {
2295
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2296
proc->pid, thread->pid, fp->handle);
2297
return -EINVAL;
2298
}
2299
if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2300
ret = -EPERM;
2301
goto done;
2302
}
2303
2304
binder_node_lock(node);
2305
if (node->proc == target_proc) {
2306
if (fp->hdr.type == BINDER_TYPE_HANDLE)
2307
fp->hdr.type = BINDER_TYPE_BINDER;
2308
else
2309
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2310
fp->binder = node->ptr;
2311
fp->cookie = node->cookie;
2312
if (node->proc)
2313
binder_inner_proc_lock(node->proc);
2314
else
2315
__acquire(&node->proc->inner_lock);
2316
binder_inc_node_nilocked(node,
2317
fp->hdr.type == BINDER_TYPE_BINDER,
2318
0, NULL);
2319
if (node->proc)
2320
binder_inner_proc_unlock(node->proc);
2321
else
2322
__release(&node->proc->inner_lock);
2323
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2324
binder_debug(BINDER_DEBUG_TRANSACTION,
2325
" ref %d desc %d -> node %d u%016llx\n",
2326
src_rdata.debug_id, src_rdata.desc, node->debug_id,
2327
(u64)node->ptr);
2328
binder_node_unlock(node);
2329
} else {
2330
struct binder_ref_data dest_rdata;
2331
2332
binder_node_unlock(node);
2333
ret = binder_inc_ref_for_node(target_proc, node,
2334
fp->hdr.type == BINDER_TYPE_HANDLE,
2335
NULL, &dest_rdata);
2336
if (ret)
2337
goto done;
2338
2339
fp->binder = 0;
2340
fp->handle = dest_rdata.desc;
2341
fp->cookie = 0;
2342
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2343
&dest_rdata);
2344
binder_debug(BINDER_DEBUG_TRANSACTION,
2345
" ref %d desc %d -> ref %d desc %d (node %d)\n",
2346
src_rdata.debug_id, src_rdata.desc,
2347
dest_rdata.debug_id, dest_rdata.desc,
2348
node->debug_id);
2349
}
2350
done:
2351
binder_put_node(node);
2352
return ret;
2353
}
2354
2355
static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2356
struct binder_transaction *t,
2357
struct binder_thread *thread,
2358
struct binder_transaction *in_reply_to)
2359
{
2360
struct binder_proc *proc = thread->proc;
2361
struct binder_proc *target_proc = t->to_proc;
2362
struct binder_txn_fd_fixup *fixup;
2363
struct file *file;
2364
int ret = 0;
2365
bool target_allows_fd;
2366
2367
if (in_reply_to)
2368
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2369
else
2370
target_allows_fd = t->buffer->target_node->accept_fds;
2371
if (!target_allows_fd) {
2372
binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2373
proc->pid, thread->pid,
2374
in_reply_to ? "reply" : "transaction",
2375
fd);
2376
ret = -EPERM;
2377
goto err_fd_not_accepted;
2378
}
2379
2380
file = fget(fd);
2381
if (!file) {
2382
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2383
proc->pid, thread->pid, fd);
2384
ret = -EBADF;
2385
goto err_fget;
2386
}
2387
ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2388
if (ret < 0) {
2389
ret = -EPERM;
2390
goto err_security;
2391
}
2392
2393
/*
2394
* Add fixup record for this transaction. The allocation
2395
* of the fd in the target needs to be done from a
2396
* target thread.
2397
*/
2398
fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2399
if (!fixup) {
2400
ret = -ENOMEM;
2401
goto err_alloc;
2402
}
2403
fixup->file = file;
2404
fixup->offset = fd_offset;
2405
fixup->target_fd = -1;
2406
trace_binder_transaction_fd_send(t, fd, fixup->offset);
2407
list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2408
2409
return ret;
2410
2411
err_alloc:
2412
err_security:
2413
fput(file);
2414
err_fget:
2415
err_fd_not_accepted:
2416
return ret;
2417
}
2418
2419
/**
2420
* struct binder_ptr_fixup - data to be fixed-up in target buffer
2421
* @offset offset in target buffer to fixup
2422
* @skip_size bytes to skip in copy (fixup will be written later)
2423
* @fixup_data data to write at fixup offset
2424
* @node list node
2425
*
2426
* This is used for the pointer fixup list (pf) which is created and consumed
2427
* during binder_transaction() and is only accessed locally. No
2428
* locking is necessary.
2429
*
2430
* The list is ordered by @offset.
2431
*/
2432
struct binder_ptr_fixup {
2433
binder_size_t offset;
2434
size_t skip_size;
2435
binder_uintptr_t fixup_data;
2436
struct list_head node;
2437
};
2438
2439
/**
2440
* struct binder_sg_copy - scatter-gather data to be copied
2441
* @offset offset in target buffer
2442
* @sender_uaddr user address in source buffer
2443
* @length bytes to copy
2444
* @node list node
2445
*
2446
* This is used for the sg copy list (sgc) which is created and consumed
2447
* during binder_transaction() and is only accessed locally. No
2448
* locking is necessary.
2449
*
2450
* The list is ordered by @offset.
2451
*/
2452
struct binder_sg_copy {
2453
binder_size_t offset;
2454
const void __user *sender_uaddr;
2455
size_t length;
2456
struct list_head node;
2457
};
2458
2459
/**
2460
* binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2461
* @alloc: binder_alloc associated with @buffer
2462
* @buffer: binder buffer in target process
2463
* @sgc_head: list_head of scatter-gather copy list
2464
* @pf_head: list_head of pointer fixup list
2465
*
2466
* Processes all elements of @sgc_head, applying fixups from @pf_head
2467
* and copying the scatter-gather data from the source process' user
2468
* buffer to the target's buffer. It is expected that the list creation
2469
* and processing all occurs during binder_transaction() so these lists
2470
* are only accessed in local context.
2471
*
2472
* Return: 0=success, else -errno
2473
*/
2474
static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2475
struct binder_buffer *buffer,
2476
struct list_head *sgc_head,
2477
struct list_head *pf_head)
2478
{
2479
int ret = 0;
2480
struct binder_sg_copy *sgc, *tmpsgc;
2481
struct binder_ptr_fixup *tmppf;
2482
struct binder_ptr_fixup *pf =
2483
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2484
node);
2485
2486
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2487
size_t bytes_copied = 0;
2488
2489
while (bytes_copied < sgc->length) {
2490
size_t copy_size;
2491
size_t bytes_left = sgc->length - bytes_copied;
2492
size_t offset = sgc->offset + bytes_copied;
2493
2494
/*
2495
* We copy up to the fixup (pointed to by pf)
2496
*/
2497
copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2498
: bytes_left;
2499
if (!ret && copy_size)
2500
ret = binder_alloc_copy_user_to_buffer(
2501
alloc, buffer,
2502
offset,
2503
sgc->sender_uaddr + bytes_copied,
2504
copy_size);
2505
bytes_copied += copy_size;
2506
if (copy_size != bytes_left) {
2507
BUG_ON(!pf);
2508
/* we stopped at a fixup offset */
2509
if (pf->skip_size) {
2510
/*
2511
* we are just skipping. This is for
2512
* BINDER_TYPE_FDA where the translated
2513
* fds will be fixed up when we get
2514
* to target context.
2515
*/
2516
bytes_copied += pf->skip_size;
2517
} else {
2518
/* apply the fixup indicated by pf */
2519
if (!ret)
2520
ret = binder_alloc_copy_to_buffer(
2521
alloc, buffer,
2522
pf->offset,
2523
&pf->fixup_data,
2524
sizeof(pf->fixup_data));
2525
bytes_copied += sizeof(pf->fixup_data);
2526
}
2527
list_del(&pf->node);
2528
kfree(pf);
2529
pf = list_first_entry_or_null(pf_head,
2530
struct binder_ptr_fixup, node);
2531
}
2532
}
2533
list_del(&sgc->node);
2534
kfree(sgc);
2535
}
2536
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2537
BUG_ON(pf->skip_size == 0);
2538
list_del(&pf->node);
2539
kfree(pf);
2540
}
2541
BUG_ON(!list_empty(sgc_head));
2542
2543
return ret > 0 ? -EINVAL : ret;
2544
}
2545
2546
/**
2547
* binder_cleanup_deferred_txn_lists() - free specified lists
2548
* @sgc_head: list_head of scatter-gather copy list
2549
* @pf_head: list_head of pointer fixup list
2550
*
2551
* Called to clean up @sgc_head and @pf_head if there is an
2552
* error.
2553
*/
2554
static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2555
struct list_head *pf_head)
2556
{
2557
struct binder_sg_copy *sgc, *tmpsgc;
2558
struct binder_ptr_fixup *pf, *tmppf;
2559
2560
list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2561
list_del(&sgc->node);
2562
kfree(sgc);
2563
}
2564
list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2565
list_del(&pf->node);
2566
kfree(pf);
2567
}
2568
}
2569
2570
/**
2571
* binder_defer_copy() - queue a scatter-gather buffer for copy
2572
* @sgc_head: list_head of scatter-gather copy list
2573
* @offset: binder buffer offset in target process
2574
* @sender_uaddr: user address in source process
2575
* @length: bytes to copy
2576
*
2577
* Specify a scatter-gather block to be copied. The actual copy must
2578
* be deferred until all the needed fixups are identified and queued.
2579
* Then the copy and fixups are done together so un-translated values
2580
* from the source are never visible in the target buffer.
2581
*
2582
* We are guaranteed that repeated calls to this function will have
2583
* monotonically increasing @offset values so the list will naturally
2584
* be ordered.
2585
*
2586
* Return: 0=success, else -errno
2587
*/
2588
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2589
const void __user *sender_uaddr, size_t length)
2590
{
2591
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2592
2593
if (!bc)
2594
return -ENOMEM;
2595
2596
bc->offset = offset;
2597
bc->sender_uaddr = sender_uaddr;
2598
bc->length = length;
2599
INIT_LIST_HEAD(&bc->node);
2600
2601
/*
2602
* We are guaranteed that the deferred copies are in-order
2603
* so just add to the tail.
2604
*/
2605
list_add_tail(&bc->node, sgc_head);
2606
2607
return 0;
2608
}
2609
2610
/**
2611
* binder_add_fixup() - queue a fixup to be applied to sg copy
2612
* @pf_head: list_head of binder ptr fixup list
2613
* @offset: binder buffer offset in target process
2614
* @fixup: bytes to be copied for fixup
2615
* @skip_size: bytes to skip when copying (fixup will be applied later)
2616
*
2617
* Add the specified fixup to a list ordered by @offset. When copying
2618
* the scatter-gather buffers, the fixup will be copied instead of
2619
* data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2620
* will be applied later (in target process context), so we just skip
2621
* the bytes specified by @skip_size. If @skip_size is 0, we copy the
2622
* value in @fixup.
2623
*
2624
* This function is called *mostly* in @offset order, but there are
2625
* exceptions. Since out-of-order inserts are relatively uncommon,
2626
* we insert the new element by searching backward from the tail of
2627
* the list.
2628
*
2629
* Return: 0=success, else -errno
2630
*/
2631
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2632
binder_uintptr_t fixup, size_t skip_size)
2633
{
2634
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2635
struct binder_ptr_fixup *tmppf;
2636
2637
if (!pf)
2638
return -ENOMEM;
2639
2640
pf->offset = offset;
2641
pf->fixup_data = fixup;
2642
pf->skip_size = skip_size;
2643
INIT_LIST_HEAD(&pf->node);
2644
2645
/* Fixups are *mostly* added in-order, but there are some
2646
* exceptions. Look backwards through list for insertion point.
2647
*/
2648
list_for_each_entry_reverse(tmppf, pf_head, node) {
2649
if (tmppf->offset < pf->offset) {
2650
list_add(&pf->node, &tmppf->node);
2651
return 0;
2652
}
2653
}
2654
/*
2655
* if we get here, then the new offset is the lowest so
2656
* insert at the head
2657
*/
2658
list_add(&pf->node, pf_head);
2659
return 0;
2660
}
2661
2662
static int binder_translate_fd_array(struct list_head *pf_head,
2663
struct binder_fd_array_object *fda,
2664
const void __user *sender_ubuffer,
2665
struct binder_buffer_object *parent,
2666
struct binder_buffer_object *sender_uparent,
2667
struct binder_transaction *t,
2668
struct binder_thread *thread,
2669
struct binder_transaction *in_reply_to)
2670
{
2671
binder_size_t fdi, fd_buf_size;
2672
binder_size_t fda_offset;
2673
const void __user *sender_ufda_base;
2674
struct binder_proc *proc = thread->proc;
2675
int ret;
2676
2677
if (fda->num_fds == 0)
2678
return 0;
2679
2680
fd_buf_size = sizeof(u32) * fda->num_fds;
2681
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2682
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2683
proc->pid, thread->pid, (u64)fda->num_fds);
2684
return -EINVAL;
2685
}
2686
if (fd_buf_size > parent->length ||
2687
fda->parent_offset > parent->length - fd_buf_size) {
2688
/* No space for all file descriptors here. */
2689
binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2690
proc->pid, thread->pid, (u64)fda->num_fds);
2691
return -EINVAL;
2692
}
2693
/*
2694
* the source data for binder_buffer_object is visible
2695
* to user-space and the @buffer element is the user
2696
* pointer to the buffer_object containing the fd_array.
2697
* Convert the address to an offset relative to
2698
* the base of the transaction buffer.
2699
*/
2700
fda_offset = parent->buffer - t->buffer->user_data +
2701
fda->parent_offset;
2702
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2703
fda->parent_offset;
2704
2705
if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2706
!IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2707
binder_user_error("%d:%d parent offset not aligned correctly.\n",
2708
proc->pid, thread->pid);
2709
return -EINVAL;
2710
}
2711
ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2712
if (ret)
2713
return ret;
2714
2715
for (fdi = 0; fdi < fda->num_fds; fdi++) {
2716
u32 fd;
2717
binder_size_t offset = fda_offset + fdi * sizeof(fd);
2718
binder_size_t sender_uoffset = fdi * sizeof(fd);
2719
2720
ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2721
if (!ret)
2722
ret = binder_translate_fd(fd, offset, t, thread,
2723
in_reply_to);
2724
if (ret)
2725
return ret > 0 ? -EINVAL : ret;
2726
}
2727
return 0;
2728
}
2729
2730
static int binder_fixup_parent(struct list_head *pf_head,
2731
struct binder_transaction *t,
2732
struct binder_thread *thread,
2733
struct binder_buffer_object *bp,
2734
binder_size_t off_start_offset,
2735
binder_size_t num_valid,
2736
binder_size_t last_fixup_obj_off,
2737
binder_size_t last_fixup_min_off)
2738
{
2739
struct binder_buffer_object *parent;
2740
struct binder_buffer *b = t->buffer;
2741
struct binder_proc *proc = thread->proc;
2742
struct binder_proc *target_proc = t->to_proc;
2743
struct binder_object object;
2744
binder_size_t buffer_offset;
2745
binder_size_t parent_offset;
2746
2747
if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2748
return 0;
2749
2750
parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2751
off_start_offset, &parent_offset,
2752
num_valid);
2753
if (!parent) {
2754
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2755
proc->pid, thread->pid);
2756
return -EINVAL;
2757
}
2758
2759
if (!binder_validate_fixup(target_proc, b, off_start_offset,
2760
parent_offset, bp->parent_offset,
2761
last_fixup_obj_off,
2762
last_fixup_min_off)) {
2763
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2764
proc->pid, thread->pid);
2765
return -EINVAL;
2766
}
2767
2768
if (parent->length < sizeof(binder_uintptr_t) ||
2769
bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2770
/* No space for a pointer here! */
2771
binder_user_error("%d:%d got transaction with invalid parent offset\n",
2772
proc->pid, thread->pid);
2773
return -EINVAL;
2774
}
2775
2776
buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2777
2778
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2779
}
2780
2781
/**
2782
* binder_can_update_transaction() - Can a txn be superseded by an updated one?
2783
* @t1: the pending async txn in the frozen process
2784
* @t2: the new async txn to supersede the outdated pending one
2785
*
2786
* Return: true if t2 can supersede t1
2787
* false if t2 can not supersede t1
2788
*/
2789
static bool binder_can_update_transaction(struct binder_transaction *t1,
2790
struct binder_transaction *t2)
2791
{
2792
if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2793
(TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2794
return false;
2795
if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2796
t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2797
t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2798
t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2799
return true;
2800
return false;
2801
}
2802
2803
/**
2804
* binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2805
* @t: new async transaction
2806
* @target_list: list to find outdated transaction
2807
*
2808
* Return: the outdated transaction if found
2809
* NULL if no outdated transacton can be found
2810
*
2811
* Requires the proc->inner_lock to be held.
2812
*/
2813
static struct binder_transaction *
2814
binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2815
struct list_head *target_list)
2816
{
2817
struct binder_work *w;
2818
2819
list_for_each_entry(w, target_list, entry) {
2820
struct binder_transaction *t_queued;
2821
2822
if (w->type != BINDER_WORK_TRANSACTION)
2823
continue;
2824
t_queued = container_of(w, struct binder_transaction, work);
2825
if (binder_can_update_transaction(t_queued, t))
2826
return t_queued;
2827
}
2828
return NULL;
2829
}
2830
2831
/**
2832
* binder_proc_transaction() - sends a transaction to a process and wakes it up
2833
* @t: transaction to send
2834
* @proc: process to send the transaction to
2835
* @thread: thread in @proc to send the transaction to (may be NULL)
2836
*
2837
* This function queues a transaction to the specified process. It will try
2838
* to find a thread in the target process to handle the transaction and
2839
* wake it up. If no thread is found, the work is queued to the proc
2840
* waitqueue.
2841
*
2842
* If the @thread parameter is not NULL, the transaction is always queued
2843
* to the waitlist of that specific thread.
2844
*
2845
* Return: 0 if the transaction was successfully queued
2846
* BR_DEAD_REPLY if the target process or thread is dead
2847
* BR_FROZEN_REPLY if the target process or thread is frozen and
2848
* the sync transaction was rejected
2849
* BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2850
* and the async transaction was successfully queued
2851
*/
2852
static int binder_proc_transaction(struct binder_transaction *t,
2853
struct binder_proc *proc,
2854
struct binder_thread *thread)
2855
{
2856
struct binder_node *node = t->buffer->target_node;
2857
bool oneway = !!(t->flags & TF_ONE_WAY);
2858
bool pending_async = false;
2859
struct binder_transaction *t_outdated = NULL;
2860
bool frozen = false;
2861
2862
BUG_ON(!node);
2863
binder_node_lock(node);
2864
if (oneway) {
2865
BUG_ON(thread);
2866
if (node->has_async_transaction)
2867
pending_async = true;
2868
else
2869
node->has_async_transaction = true;
2870
}
2871
2872
binder_inner_proc_lock(proc);
2873
if (proc->is_frozen) {
2874
frozen = true;
2875
proc->sync_recv |= !oneway;
2876
proc->async_recv |= oneway;
2877
}
2878
2879
if ((frozen && !oneway) || proc->is_dead ||
2880
(thread && thread->is_dead)) {
2881
binder_inner_proc_unlock(proc);
2882
binder_node_unlock(node);
2883
return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2884
}
2885
2886
if (!thread && !pending_async)
2887
thread = binder_select_thread_ilocked(proc);
2888
2889
if (thread) {
2890
binder_enqueue_thread_work_ilocked(thread, &t->work);
2891
} else if (!pending_async) {
2892
binder_enqueue_work_ilocked(&t->work, &proc->todo);
2893
} else {
2894
if ((t->flags & TF_UPDATE_TXN) && frozen) {
2895
t_outdated = binder_find_outdated_transaction_ilocked(t,
2896
&node->async_todo);
2897
if (t_outdated) {
2898
binder_debug(BINDER_DEBUG_TRANSACTION,
2899
"txn %d supersedes %d\n",
2900
t->debug_id, t_outdated->debug_id);
2901
list_del_init(&t_outdated->work.entry);
2902
proc->outstanding_txns--;
2903
}
2904
}
2905
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2906
}
2907
2908
if (!pending_async)
2909
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2910
2911
proc->outstanding_txns++;
2912
binder_inner_proc_unlock(proc);
2913
binder_node_unlock(node);
2914
2915
/*
2916
* To reduce potential contention, free the outdated transaction and
2917
* buffer after releasing the locks.
2918
*/
2919
if (t_outdated) {
2920
struct binder_buffer *buffer = t_outdated->buffer;
2921
2922
t_outdated->buffer = NULL;
2923
buffer->transaction = NULL;
2924
trace_binder_transaction_update_buffer_release(buffer);
2925
binder_release_entire_buffer(proc, NULL, buffer, false);
2926
binder_alloc_free_buf(&proc->alloc, buffer);
2927
kfree(t_outdated);
2928
binder_stats_deleted(BINDER_STAT_TRANSACTION);
2929
}
2930
2931
if (oneway && frozen)
2932
return BR_TRANSACTION_PENDING_FROZEN;
2933
2934
return 0;
2935
}
2936
2937
/**
2938
* binder_get_node_refs_for_txn() - Get required refs on node for txn
2939
* @node: struct binder_node for which to get refs
2940
* @procp: returns @node->proc if valid
2941
* @error: if no @procp then returns BR_DEAD_REPLY
2942
*
2943
* User-space normally keeps the node alive when creating a transaction
2944
* since it has a reference to the target. The local strong ref keeps it
2945
* alive if the sending process dies before the target process processes
2946
* the transaction. If the source process is malicious or has a reference
2947
* counting bug, relying on the local strong ref can fail.
2948
*
2949
* Since user-space can cause the local strong ref to go away, we also take
2950
* a tmpref on the node to ensure it survives while we are constructing
2951
* the transaction. We also need a tmpref on the proc while we are
2952
* constructing the transaction, so we take that here as well.
2953
*
2954
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2955
* Also sets @procp if valid. If the @node->proc is NULL indicating that the
2956
* target proc has died, @error is set to BR_DEAD_REPLY.
2957
*/
2958
static struct binder_node *binder_get_node_refs_for_txn(
2959
struct binder_node *node,
2960
struct binder_proc **procp,
2961
uint32_t *error)
2962
{
2963
struct binder_node *target_node = NULL;
2964
2965
binder_node_inner_lock(node);
2966
if (node->proc) {
2967
target_node = node;
2968
binder_inc_node_nilocked(node, 1, 0, NULL);
2969
binder_inc_node_tmpref_ilocked(node);
2970
node->proc->tmp_ref++;
2971
*procp = node->proc;
2972
} else
2973
*error = BR_DEAD_REPLY;
2974
binder_node_inner_unlock(node);
2975
2976
return target_node;
2977
}
2978
2979
static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2980
uint32_t command, int32_t param)
2981
{
2982
struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2983
2984
if (!from) {
2985
/* annotation for sparse */
2986
__release(&from->proc->inner_lock);
2987
return;
2988
}
2989
2990
/* don't override existing errors */
2991
if (from->ee.command == BR_OK)
2992
binder_set_extended_error(&from->ee, id, command, param);
2993
binder_inner_proc_unlock(from->proc);
2994
binder_thread_dec_tmpref(from);
2995
}
2996
2997
/**
2998
* binder_netlink_report() - report a transaction failure via netlink
2999
* @proc: the binder proc sending the transaction
3000
* @t: the binder transaction that failed
3001
* @data_size: the user provided data size for the transaction
3002
* @error: enum binder_driver_return_protocol returned to sender
3003
*/
3004
static void binder_netlink_report(struct binder_proc *proc,
3005
struct binder_transaction *t,
3006
u32 data_size,
3007
u32 error)
3008
{
3009
const char *context = proc->context->name;
3010
struct sk_buff *skb;
3011
void *hdr;
3012
3013
if (!genl_has_listeners(&binder_nl_family, &init_net,
3014
BINDER_NLGRP_REPORT))
3015
return;
3016
3017
trace_binder_netlink_report(context, t, data_size, error);
3018
3019
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
3020
if (!skb)
3021
return;
3022
3023
hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
3024
if (!hdr)
3025
goto free_skb;
3026
3027
if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
3028
nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
3029
nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
3030
nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
3031
goto cancel_skb;
3032
3033
if (t->to_proc &&
3034
nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
3035
goto cancel_skb;
3036
3037
if (t->to_thread &&
3038
nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
3039
goto cancel_skb;
3040
3041
if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
3042
goto cancel_skb;
3043
3044
if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
3045
nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
3046
nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
3047
goto cancel_skb;
3048
3049
genlmsg_end(skb, hdr);
3050
genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
3051
GFP_KERNEL);
3052
return;
3053
3054
cancel_skb:
3055
genlmsg_cancel(skb, hdr);
3056
free_skb:
3057
nlmsg_free(skb);
3058
}
3059
3060
static void binder_transaction(struct binder_proc *proc,
3061
struct binder_thread *thread,
3062
struct binder_transaction_data *tr, int reply,
3063
binder_size_t extra_buffers_size)
3064
{
3065
int ret;
3066
struct binder_transaction *t;
3067
struct binder_work *w;
3068
struct binder_work *tcomplete;
3069
binder_size_t buffer_offset = 0;
3070
binder_size_t off_start_offset, off_end_offset;
3071
binder_size_t off_min;
3072
binder_size_t sg_buf_offset, sg_buf_end_offset;
3073
binder_size_t user_offset = 0;
3074
struct binder_proc *target_proc = NULL;
3075
struct binder_thread *target_thread = NULL;
3076
struct binder_node *target_node = NULL;
3077
struct binder_transaction *in_reply_to = NULL;
3078
struct binder_transaction_log_entry *e;
3079
uint32_t return_error = 0;
3080
uint32_t return_error_param = 0;
3081
uint32_t return_error_line = 0;
3082
binder_size_t last_fixup_obj_off = 0;
3083
binder_size_t last_fixup_min_off = 0;
3084
struct binder_context *context = proc->context;
3085
int t_debug_id = atomic_inc_return(&binder_last_id);
3086
ktime_t t_start_time = ktime_get();
3087
struct lsm_context lsmctx = { };
3088
struct list_head sgc_head;
3089
struct list_head pf_head;
3090
const void __user *user_buffer = (const void __user *)
3091
(uintptr_t)tr->data.ptr.buffer;
3092
INIT_LIST_HEAD(&sgc_head);
3093
INIT_LIST_HEAD(&pf_head);
3094
3095
e = binder_transaction_log_add(&binder_transaction_log);
3096
e->debug_id = t_debug_id;
3097
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3098
e->from_proc = proc->pid;
3099
e->from_thread = thread->pid;
3100
e->target_handle = tr->target.handle;
3101
e->data_size = tr->data_size;
3102
e->offsets_size = tr->offsets_size;
3103
strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3104
3105
binder_inner_proc_lock(proc);
3106
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3107
binder_inner_proc_unlock(proc);
3108
3109
t = kzalloc(sizeof(*t), GFP_KERNEL);
3110
if (!t) {
3111
binder_txn_error("%d:%d cannot allocate transaction\n",
3112
thread->pid, proc->pid);
3113
return_error = BR_FAILED_REPLY;
3114
return_error_param = -ENOMEM;
3115
return_error_line = __LINE__;
3116
goto err_alloc_t_failed;
3117
}
3118
INIT_LIST_HEAD(&t->fd_fixups);
3119
binder_stats_created(BINDER_STAT_TRANSACTION);
3120
spin_lock_init(&t->lock);
3121
t->debug_id = t_debug_id;
3122
t->start_time = t_start_time;
3123
t->from_pid = proc->pid;
3124
t->from_tid = thread->pid;
3125
t->sender_euid = task_euid(proc->tsk);
3126
t->code = tr->code;
3127
t->flags = tr->flags;
3128
t->priority = task_nice(current);
3129
t->work.type = BINDER_WORK_TRANSACTION;
3130
t->is_async = !reply && (tr->flags & TF_ONE_WAY);
3131
t->is_reply = reply;
3132
if (!reply && !(tr->flags & TF_ONE_WAY))
3133
t->from = thread;
3134
3135
if (reply) {
3136
binder_inner_proc_lock(proc);
3137
in_reply_to = thread->transaction_stack;
3138
if (in_reply_to == NULL) {
3139
binder_inner_proc_unlock(proc);
3140
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3141
proc->pid, thread->pid);
3142
return_error = BR_FAILED_REPLY;
3143
return_error_param = -EPROTO;
3144
return_error_line = __LINE__;
3145
goto err_empty_call_stack;
3146
}
3147
if (in_reply_to->to_thread != thread) {
3148
spin_lock(&in_reply_to->lock);
3149
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3150
proc->pid, thread->pid, in_reply_to->debug_id,
3151
in_reply_to->to_proc ?
3152
in_reply_to->to_proc->pid : 0,
3153
in_reply_to->to_thread ?
3154
in_reply_to->to_thread->pid : 0);
3155
spin_unlock(&in_reply_to->lock);
3156
binder_inner_proc_unlock(proc);
3157
return_error = BR_FAILED_REPLY;
3158
return_error_param = -EPROTO;
3159
return_error_line = __LINE__;
3160
in_reply_to = NULL;
3161
goto err_bad_call_stack;
3162
}
3163
thread->transaction_stack = in_reply_to->to_parent;
3164
binder_inner_proc_unlock(proc);
3165
binder_set_nice(in_reply_to->saved_priority);
3166
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3167
if (target_thread == NULL) {
3168
/* annotation for sparse */
3169
__release(&target_thread->proc->inner_lock);
3170
binder_txn_error("%d:%d reply target not found\n",
3171
thread->pid, proc->pid);
3172
return_error = BR_DEAD_REPLY;
3173
return_error_line = __LINE__;
3174
goto err_dead_binder;
3175
}
3176
if (target_thread->transaction_stack != in_reply_to) {
3177
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3178
proc->pid, thread->pid,
3179
target_thread->transaction_stack ?
3180
target_thread->transaction_stack->debug_id : 0,
3181
in_reply_to->debug_id);
3182
binder_inner_proc_unlock(target_thread->proc);
3183
return_error = BR_FAILED_REPLY;
3184
return_error_param = -EPROTO;
3185
return_error_line = __LINE__;
3186
in_reply_to = NULL;
3187
target_thread = NULL;
3188
goto err_dead_binder;
3189
}
3190
target_proc = target_thread->proc;
3191
target_proc->tmp_ref++;
3192
binder_inner_proc_unlock(target_thread->proc);
3193
} else {
3194
if (tr->target.handle) {
3195
struct binder_ref *ref;
3196
3197
/*
3198
* There must already be a strong ref
3199
* on this node. If so, do a strong
3200
* increment on the node to ensure it
3201
* stays alive until the transaction is
3202
* done.
3203
*/
3204
binder_proc_lock(proc);
3205
ref = binder_get_ref_olocked(proc, tr->target.handle,
3206
true);
3207
if (ref) {
3208
target_node = binder_get_node_refs_for_txn(
3209
ref->node, &target_proc,
3210
&return_error);
3211
} else {
3212
binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3213
proc->pid, thread->pid, tr->target.handle);
3214
return_error = BR_FAILED_REPLY;
3215
}
3216
binder_proc_unlock(proc);
3217
} else {
3218
mutex_lock(&context->context_mgr_node_lock);
3219
target_node = context->binder_context_mgr_node;
3220
if (target_node)
3221
target_node = binder_get_node_refs_for_txn(
3222
target_node, &target_proc,
3223
&return_error);
3224
else
3225
return_error = BR_DEAD_REPLY;
3226
mutex_unlock(&context->context_mgr_node_lock);
3227
if (target_node && target_proc->pid == proc->pid) {
3228
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3229
proc->pid, thread->pid);
3230
return_error = BR_FAILED_REPLY;
3231
return_error_param = -EINVAL;
3232
return_error_line = __LINE__;
3233
goto err_invalid_target_handle;
3234
}
3235
}
3236
if (!target_node) {
3237
binder_txn_error("%d:%d cannot find target node\n",
3238
proc->pid, thread->pid);
3239
/* return_error is set above */
3240
return_error_param = -EINVAL;
3241
return_error_line = __LINE__;
3242
goto err_dead_binder;
3243
}
3244
e->to_node = target_node->debug_id;
3245
if (WARN_ON(proc == target_proc)) {
3246
binder_txn_error("%d:%d self transactions not allowed\n",
3247
thread->pid, proc->pid);
3248
return_error = BR_FAILED_REPLY;
3249
return_error_param = -EINVAL;
3250
return_error_line = __LINE__;
3251
goto err_invalid_target_handle;
3252
}
3253
if (security_binder_transaction(proc->cred,
3254
target_proc->cred) < 0) {
3255
binder_txn_error("%d:%d transaction credentials failed\n",
3256
thread->pid, proc->pid);
3257
return_error = BR_FAILED_REPLY;
3258
return_error_param = -EPERM;
3259
return_error_line = __LINE__;
3260
goto err_invalid_target_handle;
3261
}
3262
binder_inner_proc_lock(proc);
3263
3264
w = list_first_entry_or_null(&thread->todo,
3265
struct binder_work, entry);
3266
if (!(tr->flags & TF_ONE_WAY) && w &&
3267
w->type == BINDER_WORK_TRANSACTION) {
3268
/*
3269
* Do not allow new outgoing transaction from a
3270
* thread that has a transaction at the head of
3271
* its todo list. Only need to check the head
3272
* because binder_select_thread_ilocked picks a
3273
* thread from proc->waiting_threads to enqueue
3274
* the transaction, and nothing is queued to the
3275
* todo list while the thread is on waiting_threads.
3276
*/
3277
binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3278
proc->pid, thread->pid);
3279
binder_inner_proc_unlock(proc);
3280
return_error = BR_FAILED_REPLY;
3281
return_error_param = -EPROTO;
3282
return_error_line = __LINE__;
3283
goto err_bad_todo_list;
3284
}
3285
3286
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3287
struct binder_transaction *tmp;
3288
3289
tmp = thread->transaction_stack;
3290
if (tmp->to_thread != thread) {
3291
spin_lock(&tmp->lock);
3292
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3293
proc->pid, thread->pid, tmp->debug_id,
3294
tmp->to_proc ? tmp->to_proc->pid : 0,
3295
tmp->to_thread ?
3296
tmp->to_thread->pid : 0);
3297
spin_unlock(&tmp->lock);
3298
binder_inner_proc_unlock(proc);
3299
return_error = BR_FAILED_REPLY;
3300
return_error_param = -EPROTO;
3301
return_error_line = __LINE__;
3302
goto err_bad_call_stack;
3303
}
3304
while (tmp) {
3305
struct binder_thread *from;
3306
3307
spin_lock(&tmp->lock);
3308
from = tmp->from;
3309
if (from && from->proc == target_proc) {
3310
atomic_inc(&from->tmp_ref);
3311
target_thread = from;
3312
spin_unlock(&tmp->lock);
3313
break;
3314
}
3315
spin_unlock(&tmp->lock);
3316
tmp = tmp->from_parent;
3317
}
3318
}
3319
binder_inner_proc_unlock(proc);
3320
}
3321
3322
t->to_proc = target_proc;
3323
t->to_thread = target_thread;
3324
if (target_thread)
3325
e->to_thread = target_thread->pid;
3326
e->to_proc = target_proc->pid;
3327
3328
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3329
if (tcomplete == NULL) {
3330
binder_txn_error("%d:%d cannot allocate work for transaction\n",
3331
thread->pid, proc->pid);
3332
return_error = BR_FAILED_REPLY;
3333
return_error_param = -ENOMEM;
3334
return_error_line = __LINE__;
3335
goto err_alloc_tcomplete_failed;
3336
}
3337
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3338
3339
if (reply)
3340
binder_debug(BINDER_DEBUG_TRANSACTION,
3341
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
3342
proc->pid, thread->pid, t->debug_id,
3343
target_proc->pid, target_thread->pid,
3344
(u64)tr->data_size, (u64)tr->offsets_size,
3345
(u64)extra_buffers_size);
3346
else
3347
binder_debug(BINDER_DEBUG_TRANSACTION,
3348
"%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
3349
proc->pid, thread->pid, t->debug_id,
3350
target_proc->pid, target_node->debug_id,
3351
(u64)tr->data_size, (u64)tr->offsets_size,
3352
(u64)extra_buffers_size);
3353
3354
if (target_node && target_node->txn_security_ctx) {
3355
u32 secid;
3356
size_t added_size;
3357
3358
security_cred_getsecid(proc->cred, &secid);
3359
ret = security_secid_to_secctx(secid, &lsmctx);
3360
if (ret < 0) {
3361
binder_txn_error("%d:%d failed to get security context\n",
3362
thread->pid, proc->pid);
3363
return_error = BR_FAILED_REPLY;
3364
return_error_param = ret;
3365
return_error_line = __LINE__;
3366
goto err_get_secctx_failed;
3367
}
3368
added_size = ALIGN(lsmctx.len, sizeof(u64));
3369
extra_buffers_size += added_size;
3370
if (extra_buffers_size < added_size) {
3371
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3372
thread->pid, proc->pid);
3373
return_error = BR_FAILED_REPLY;
3374
return_error_param = -EINVAL;
3375
return_error_line = __LINE__;
3376
goto err_bad_extra_size;
3377
}
3378
}
3379
3380
trace_binder_transaction(reply, t, target_node);
3381
3382
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3383
tr->offsets_size, extra_buffers_size,
3384
!reply && (t->flags & TF_ONE_WAY));
3385
if (IS_ERR(t->buffer)) {
3386
char *s;
3387
3388
ret = PTR_ERR(t->buffer);
3389
s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3390
: (ret == -ENOSPC) ? ": no space left"
3391
: (ret == -ENOMEM) ? ": memory allocation failed"
3392
: "";
3393
binder_txn_error("cannot allocate buffer%s", s);
3394
3395
return_error_param = PTR_ERR(t->buffer);
3396
return_error = return_error_param == -ESRCH ?
3397
BR_DEAD_REPLY : BR_FAILED_REPLY;
3398
return_error_line = __LINE__;
3399
t->buffer = NULL;
3400
goto err_binder_alloc_buf_failed;
3401
}
3402
if (lsmctx.context) {
3403
int err;
3404
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3405
ALIGN(tr->offsets_size, sizeof(void *)) +
3406
ALIGN(extra_buffers_size, sizeof(void *)) -
3407
ALIGN(lsmctx.len, sizeof(u64));
3408
3409
t->security_ctx = t->buffer->user_data + buf_offset;
3410
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3411
t->buffer, buf_offset,
3412
lsmctx.context, lsmctx.len);
3413
if (err) {
3414
t->security_ctx = 0;
3415
WARN_ON(1);
3416
}
3417
security_release_secctx(&lsmctx);
3418
lsmctx.context = NULL;
3419
}
3420
t->buffer->debug_id = t->debug_id;
3421
t->buffer->transaction = t;
3422
t->buffer->target_node = target_node;
3423
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3424
trace_binder_transaction_alloc_buf(t->buffer);
3425
3426
if (binder_alloc_copy_user_to_buffer(
3427
&target_proc->alloc,
3428
t->buffer,
3429
ALIGN(tr->data_size, sizeof(void *)),
3430
(const void __user *)
3431
(uintptr_t)tr->data.ptr.offsets,
3432
tr->offsets_size)) {
3433
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3434
proc->pid, thread->pid);
3435
return_error = BR_FAILED_REPLY;
3436
return_error_param = -EFAULT;
3437
return_error_line = __LINE__;
3438
goto err_copy_data_failed;
3439
}
3440
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3441
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3442
proc->pid, thread->pid, (u64)tr->offsets_size);
3443
return_error = BR_FAILED_REPLY;
3444
return_error_param = -EINVAL;
3445
return_error_line = __LINE__;
3446
goto err_bad_offset;
3447
}
3448
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3449
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3450
proc->pid, thread->pid,
3451
(u64)extra_buffers_size);
3452
return_error = BR_FAILED_REPLY;
3453
return_error_param = -EINVAL;
3454
return_error_line = __LINE__;
3455
goto err_bad_offset;
3456
}
3457
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3458
buffer_offset = off_start_offset;
3459
off_end_offset = off_start_offset + tr->offsets_size;
3460
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3461
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3462
ALIGN(lsmctx.len, sizeof(u64));
3463
off_min = 0;
3464
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3465
buffer_offset += sizeof(binder_size_t)) {
3466
struct binder_object_header *hdr;
3467
size_t object_size;
3468
struct binder_object object;
3469
binder_size_t object_offset;
3470
binder_size_t copy_size;
3471
3472
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3473
&object_offset,
3474
t->buffer,
3475
buffer_offset,
3476
sizeof(object_offset))) {
3477
binder_txn_error("%d:%d copy offset from buffer failed\n",
3478
thread->pid, proc->pid);
3479
return_error = BR_FAILED_REPLY;
3480
return_error_param = -EINVAL;
3481
return_error_line = __LINE__;
3482
goto err_bad_offset;
3483
}
3484
3485
/*
3486
* Copy the source user buffer up to the next object
3487
* that will be processed.
3488
*/
3489
copy_size = object_offset - user_offset;
3490
if (copy_size && (user_offset > object_offset ||
3491
object_offset > tr->data_size ||
3492
binder_alloc_copy_user_to_buffer(
3493
&target_proc->alloc,
3494
t->buffer, user_offset,
3495
user_buffer + user_offset,
3496
copy_size))) {
3497
binder_user_error("%d:%d got transaction with invalid data ptr\n",
3498
proc->pid, thread->pid);
3499
return_error = BR_FAILED_REPLY;
3500
return_error_param = -EFAULT;
3501
return_error_line = __LINE__;
3502
goto err_copy_data_failed;
3503
}
3504
object_size = binder_get_object(target_proc, user_buffer,
3505
t->buffer, object_offset, &object);
3506
if (object_size == 0 || object_offset < off_min) {
3507
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3508
proc->pid, thread->pid,
3509
(u64)object_offset,
3510
(u64)off_min,
3511
(u64)t->buffer->data_size);
3512
return_error = BR_FAILED_REPLY;
3513
return_error_param = -EINVAL;
3514
return_error_line = __LINE__;
3515
goto err_bad_offset;
3516
}
3517
/*
3518
* Set offset to the next buffer fragment to be
3519
* copied
3520
*/
3521
user_offset = object_offset + object_size;
3522
3523
hdr = &object.hdr;
3524
off_min = object_offset + object_size;
3525
switch (hdr->type) {
3526
case BINDER_TYPE_BINDER:
3527
case BINDER_TYPE_WEAK_BINDER: {
3528
struct flat_binder_object *fp;
3529
3530
fp = to_flat_binder_object(hdr);
3531
ret = binder_translate_binder(fp, t, thread);
3532
3533
if (ret < 0 ||
3534
binder_alloc_copy_to_buffer(&target_proc->alloc,
3535
t->buffer,
3536
object_offset,
3537
fp, sizeof(*fp))) {
3538
binder_txn_error("%d:%d translate binder failed\n",
3539
thread->pid, proc->pid);
3540
return_error = BR_FAILED_REPLY;
3541
return_error_param = ret;
3542
return_error_line = __LINE__;
3543
goto err_translate_failed;
3544
}
3545
} break;
3546
case BINDER_TYPE_HANDLE:
3547
case BINDER_TYPE_WEAK_HANDLE: {
3548
struct flat_binder_object *fp;
3549
3550
fp = to_flat_binder_object(hdr);
3551
ret = binder_translate_handle(fp, t, thread);
3552
if (ret < 0 ||
3553
binder_alloc_copy_to_buffer(&target_proc->alloc,
3554
t->buffer,
3555
object_offset,
3556
fp, sizeof(*fp))) {
3557
binder_txn_error("%d:%d translate handle failed\n",
3558
thread->pid, proc->pid);
3559
return_error = BR_FAILED_REPLY;
3560
return_error_param = ret;
3561
return_error_line = __LINE__;
3562
goto err_translate_failed;
3563
}
3564
} break;
3565
3566
case BINDER_TYPE_FD: {
3567
struct binder_fd_object *fp = to_binder_fd_object(hdr);
3568
binder_size_t fd_offset = object_offset +
3569
(uintptr_t)&fp->fd - (uintptr_t)fp;
3570
int ret = binder_translate_fd(fp->fd, fd_offset, t,
3571
thread, in_reply_to);
3572
3573
fp->pad_binder = 0;
3574
if (ret < 0 ||
3575
binder_alloc_copy_to_buffer(&target_proc->alloc,
3576
t->buffer,
3577
object_offset,
3578
fp, sizeof(*fp))) {
3579
binder_txn_error("%d:%d translate fd failed\n",
3580
thread->pid, proc->pid);
3581
return_error = BR_FAILED_REPLY;
3582
return_error_param = ret;
3583
return_error_line = __LINE__;
3584
goto err_translate_failed;
3585
}
3586
} break;
3587
case BINDER_TYPE_FDA: {
3588
struct binder_object ptr_object;
3589
binder_size_t parent_offset;
3590
struct binder_object user_object;
3591
size_t user_parent_size;
3592
struct binder_fd_array_object *fda =
3593
to_binder_fd_array_object(hdr);
3594
size_t num_valid = (buffer_offset - off_start_offset) /
3595
sizeof(binder_size_t);
3596
struct binder_buffer_object *parent =
3597
binder_validate_ptr(target_proc, t->buffer,
3598
&ptr_object, fda->parent,
3599
off_start_offset,
3600
&parent_offset,
3601
num_valid);
3602
if (!parent) {
3603
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3604
proc->pid, thread->pid);
3605
return_error = BR_FAILED_REPLY;
3606
return_error_param = -EINVAL;
3607
return_error_line = __LINE__;
3608
goto err_bad_parent;
3609
}
3610
if (!binder_validate_fixup(target_proc, t->buffer,
3611
off_start_offset,
3612
parent_offset,
3613
fda->parent_offset,
3614
last_fixup_obj_off,
3615
last_fixup_min_off)) {
3616
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3617
proc->pid, thread->pid);
3618
return_error = BR_FAILED_REPLY;
3619
return_error_param = -EINVAL;
3620
return_error_line = __LINE__;
3621
goto err_bad_parent;
3622
}
3623
/*
3624
* We need to read the user version of the parent
3625
* object to get the original user offset
3626
*/
3627
user_parent_size =
3628
binder_get_object(proc, user_buffer, t->buffer,
3629
parent_offset, &user_object);
3630
if (user_parent_size != sizeof(user_object.bbo)) {
3631
binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3632
proc->pid, thread->pid,
3633
user_parent_size,
3634
sizeof(user_object.bbo));
3635
return_error = BR_FAILED_REPLY;
3636
return_error_param = -EINVAL;
3637
return_error_line = __LINE__;
3638
goto err_bad_parent;
3639
}
3640
ret = binder_translate_fd_array(&pf_head, fda,
3641
user_buffer, parent,
3642
&user_object.bbo, t,
3643
thread, in_reply_to);
3644
if (!ret)
3645
ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3646
t->buffer,
3647
object_offset,
3648
fda, sizeof(*fda));
3649
if (ret) {
3650
binder_txn_error("%d:%d translate fd array failed\n",
3651
thread->pid, proc->pid);
3652
return_error = BR_FAILED_REPLY;
3653
return_error_param = ret > 0 ? -EINVAL : ret;
3654
return_error_line = __LINE__;
3655
goto err_translate_failed;
3656
}
3657
last_fixup_obj_off = parent_offset;
3658
last_fixup_min_off =
3659
fda->parent_offset + sizeof(u32) * fda->num_fds;
3660
} break;
3661
case BINDER_TYPE_PTR: {
3662
struct binder_buffer_object *bp =
3663
to_binder_buffer_object(hdr);
3664
size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3665
size_t num_valid;
3666
3667
if (bp->length > buf_left) {
3668
binder_user_error("%d:%d got transaction with too large buffer\n",
3669
proc->pid, thread->pid);
3670
return_error = BR_FAILED_REPLY;
3671
return_error_param = -EINVAL;
3672
return_error_line = __LINE__;
3673
goto err_bad_offset;
3674
}
3675
ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3676
(const void __user *)(uintptr_t)bp->buffer,
3677
bp->length);
3678
if (ret) {
3679
binder_txn_error("%d:%d deferred copy failed\n",
3680
thread->pid, proc->pid);
3681
return_error = BR_FAILED_REPLY;
3682
return_error_param = ret;
3683
return_error_line = __LINE__;
3684
goto err_translate_failed;
3685
}
3686
/* Fixup buffer pointer to target proc address space */
3687
bp->buffer = t->buffer->user_data + sg_buf_offset;
3688
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3689
3690
num_valid = (buffer_offset - off_start_offset) /
3691
sizeof(binder_size_t);
3692
ret = binder_fixup_parent(&pf_head, t,
3693
thread, bp,
3694
off_start_offset,
3695
num_valid,
3696
last_fixup_obj_off,
3697
last_fixup_min_off);
3698
if (ret < 0 ||
3699
binder_alloc_copy_to_buffer(&target_proc->alloc,
3700
t->buffer,
3701
object_offset,
3702
bp, sizeof(*bp))) {
3703
binder_txn_error("%d:%d failed to fixup parent\n",
3704
thread->pid, proc->pid);
3705
return_error = BR_FAILED_REPLY;
3706
return_error_param = ret;
3707
return_error_line = __LINE__;
3708
goto err_translate_failed;
3709
}
3710
last_fixup_obj_off = object_offset;
3711
last_fixup_min_off = 0;
3712
} break;
3713
default:
3714
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3715
proc->pid, thread->pid, hdr->type);
3716
return_error = BR_FAILED_REPLY;
3717
return_error_param = -EINVAL;
3718
return_error_line = __LINE__;
3719
goto err_bad_object_type;
3720
}
3721
}
3722
/* Done processing objects, copy the rest of the buffer */
3723
if (binder_alloc_copy_user_to_buffer(
3724
&target_proc->alloc,
3725
t->buffer, user_offset,
3726
user_buffer + user_offset,
3727
tr->data_size - user_offset)) {
3728
binder_user_error("%d:%d got transaction with invalid data ptr\n",
3729
proc->pid, thread->pid);
3730
return_error = BR_FAILED_REPLY;
3731
return_error_param = -EFAULT;
3732
return_error_line = __LINE__;
3733
goto err_copy_data_failed;
3734
}
3735
3736
ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3737
&sgc_head, &pf_head);
3738
if (ret) {
3739
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3740
proc->pid, thread->pid);
3741
return_error = BR_FAILED_REPLY;
3742
return_error_param = ret;
3743
return_error_line = __LINE__;
3744
goto err_copy_data_failed;
3745
}
3746
if (t->buffer->oneway_spam_suspect) {
3747
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3748
binder_netlink_report(proc, t, tr->data_size,
3749
BR_ONEWAY_SPAM_SUSPECT);
3750
} else {
3751
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3752
}
3753
3754
if (reply) {
3755
binder_enqueue_thread_work(thread, tcomplete);
3756
binder_inner_proc_lock(target_proc);
3757
if (target_thread->is_dead) {
3758
return_error = BR_DEAD_REPLY;
3759
binder_inner_proc_unlock(target_proc);
3760
goto err_dead_proc_or_thread;
3761
}
3762
BUG_ON(t->buffer->async_transaction != 0);
3763
binder_pop_transaction_ilocked(target_thread, in_reply_to);
3764
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3765
target_proc->outstanding_txns++;
3766
binder_inner_proc_unlock(target_proc);
3767
wake_up_interruptible_sync(&target_thread->wait);
3768
binder_free_transaction(in_reply_to);
3769
} else if (!(t->flags & TF_ONE_WAY)) {
3770
BUG_ON(t->buffer->async_transaction != 0);
3771
binder_inner_proc_lock(proc);
3772
/*
3773
* Defer the TRANSACTION_COMPLETE, so we don't return to
3774
* userspace immediately; this allows the target process to
3775
* immediately start processing this transaction, reducing
3776
* latency. We will then return the TRANSACTION_COMPLETE when
3777
* the target replies (or there is an error).
3778
*/
3779
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3780
t->from_parent = thread->transaction_stack;
3781
thread->transaction_stack = t;
3782
binder_inner_proc_unlock(proc);
3783
return_error = binder_proc_transaction(t,
3784
target_proc, target_thread);
3785
if (return_error) {
3786
binder_inner_proc_lock(proc);
3787
binder_pop_transaction_ilocked(thread, t);
3788
binder_inner_proc_unlock(proc);
3789
goto err_dead_proc_or_thread;
3790
}
3791
} else {
3792
BUG_ON(target_node == NULL);
3793
BUG_ON(t->buffer->async_transaction != 1);
3794
return_error = binder_proc_transaction(t, target_proc, NULL);
3795
/*
3796
* Let the caller know when async transaction reaches a frozen
3797
* process and is put in a pending queue, waiting for the target
3798
* process to be unfrozen.
3799
*/
3800
if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
3801
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3802
binder_netlink_report(proc, t, tr->data_size,
3803
return_error);
3804
}
3805
binder_enqueue_thread_work(thread, tcomplete);
3806
if (return_error &&
3807
return_error != BR_TRANSACTION_PENDING_FROZEN)
3808
goto err_dead_proc_or_thread;
3809
}
3810
if (target_thread)
3811
binder_thread_dec_tmpref(target_thread);
3812
binder_proc_dec_tmpref(target_proc);
3813
if (target_node)
3814
binder_dec_node_tmpref(target_node);
3815
/*
3816
* write barrier to synchronize with initialization
3817
* of log entry
3818
*/
3819
smp_wmb();
3820
WRITE_ONCE(e->debug_id_done, t_debug_id);
3821
return;
3822
3823
err_dead_proc_or_thread:
3824
binder_txn_error("%d:%d dead process or thread\n",
3825
thread->pid, proc->pid);
3826
return_error_line = __LINE__;
3827
binder_dequeue_work(proc, tcomplete);
3828
err_translate_failed:
3829
err_bad_object_type:
3830
err_bad_offset:
3831
err_bad_parent:
3832
err_copy_data_failed:
3833
binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3834
binder_free_txn_fixups(t);
3835
trace_binder_transaction_failed_buffer_release(t->buffer);
3836
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3837
buffer_offset, true);
3838
if (target_node)
3839
binder_dec_node_tmpref(target_node);
3840
target_node = NULL;
3841
t->buffer->transaction = NULL;
3842
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3843
err_binder_alloc_buf_failed:
3844
err_bad_extra_size:
3845
if (lsmctx.context)
3846
security_release_secctx(&lsmctx);
3847
err_get_secctx_failed:
3848
kfree(tcomplete);
3849
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3850
err_alloc_tcomplete_failed:
3851
if (trace_binder_txn_latency_free_enabled())
3852
binder_txn_latency_free(t);
3853
err_bad_todo_list:
3854
err_bad_call_stack:
3855
err_empty_call_stack:
3856
err_dead_binder:
3857
err_invalid_target_handle:
3858
if (target_node) {
3859
binder_dec_node(target_node, 1, 0);
3860
binder_dec_node_tmpref(target_node);
3861
}
3862
3863
binder_netlink_report(proc, t, tr->data_size, return_error);
3864
kfree(t);
3865
binder_stats_deleted(BINDER_STAT_TRANSACTION);
3866
err_alloc_t_failed:
3867
3868
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3869
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
3870
proc->pid, thread->pid, reply ? "reply" :
3871
(tr->flags & TF_ONE_WAY ? "async" : "call"),
3872
target_proc ? target_proc->pid : 0,
3873
target_thread ? target_thread->pid : 0,
3874
t_debug_id, return_error, return_error_param,
3875
tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
3876
return_error_line);
3877
3878
if (target_thread)
3879
binder_thread_dec_tmpref(target_thread);
3880
if (target_proc)
3881
binder_proc_dec_tmpref(target_proc);
3882
3883
{
3884
struct binder_transaction_log_entry *fe;
3885
3886
e->return_error = return_error;
3887
e->return_error_param = return_error_param;
3888
e->return_error_line = return_error_line;
3889
fe = binder_transaction_log_add(&binder_transaction_log_failed);
3890
*fe = *e;
3891
/*
3892
* write barrier to synchronize with initialization
3893
* of log entry
3894
*/
3895
smp_wmb();
3896
WRITE_ONCE(e->debug_id_done, t_debug_id);
3897
WRITE_ONCE(fe->debug_id_done, t_debug_id);
3898
}
3899
3900
BUG_ON(thread->return_error.cmd != BR_OK);
3901
if (in_reply_to) {
3902
binder_set_txn_from_error(in_reply_to, t_debug_id,
3903
return_error, return_error_param);
3904
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3905
binder_enqueue_thread_work(thread, &thread->return_error.work);
3906
binder_send_failed_reply(in_reply_to, return_error);
3907
} else {
3908
binder_inner_proc_lock(proc);
3909
binder_set_extended_error(&thread->ee, t_debug_id,
3910
return_error, return_error_param);
3911
binder_inner_proc_unlock(proc);
3912
thread->return_error.cmd = return_error;
3913
binder_enqueue_thread_work(thread, &thread->return_error.work);
3914
}
3915
}
3916
3917
static int
3918
binder_request_freeze_notification(struct binder_proc *proc,
3919
struct binder_thread *thread,
3920
struct binder_handle_cookie *handle_cookie)
3921
{
3922
struct binder_ref_freeze *freeze;
3923
struct binder_ref *ref;
3924
3925
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3926
if (!freeze)
3927
return -ENOMEM;
3928
binder_proc_lock(proc);
3929
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3930
if (!ref) {
3931
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3932
proc->pid, thread->pid, handle_cookie->handle);
3933
binder_proc_unlock(proc);
3934
kfree(freeze);
3935
return -EINVAL;
3936
}
3937
3938
binder_node_lock(ref->node);
3939
if (ref->freeze) {
3940
binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3941
proc->pid, thread->pid);
3942
binder_node_unlock(ref->node);
3943
binder_proc_unlock(proc);
3944
kfree(freeze);
3945
return -EINVAL;
3946
}
3947
3948
binder_stats_created(BINDER_STAT_FREEZE);
3949
INIT_LIST_HEAD(&freeze->work.entry);
3950
freeze->cookie = handle_cookie->cookie;
3951
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3952
ref->freeze = freeze;
3953
3954
if (ref->node->proc) {
3955
binder_inner_proc_lock(ref->node->proc);
3956
freeze->is_frozen = ref->node->proc->is_frozen;
3957
binder_inner_proc_unlock(ref->node->proc);
3958
3959
binder_inner_proc_lock(proc);
3960
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3961
binder_wakeup_proc_ilocked(proc);
3962
binder_inner_proc_unlock(proc);
3963
}
3964
3965
binder_node_unlock(ref->node);
3966
binder_proc_unlock(proc);
3967
return 0;
3968
}
3969
3970
static int
3971
binder_clear_freeze_notification(struct binder_proc *proc,
3972
struct binder_thread *thread,
3973
struct binder_handle_cookie *handle_cookie)
3974
{
3975
struct binder_ref_freeze *freeze;
3976
struct binder_ref *ref;
3977
3978
binder_proc_lock(proc);
3979
ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3980
if (!ref) {
3981
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3982
proc->pid, thread->pid, handle_cookie->handle);
3983
binder_proc_unlock(proc);
3984
return -EINVAL;
3985
}
3986
3987
binder_node_lock(ref->node);
3988
3989
if (!ref->freeze) {
3990
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3991
proc->pid, thread->pid);
3992
binder_node_unlock(ref->node);
3993
binder_proc_unlock(proc);
3994
return -EINVAL;
3995
}
3996
freeze = ref->freeze;
3997
binder_inner_proc_lock(proc);
3998
if (freeze->cookie != handle_cookie->cookie) {
3999
binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
4000
proc->pid, thread->pid, (u64)freeze->cookie,
4001
(u64)handle_cookie->cookie);
4002
binder_inner_proc_unlock(proc);
4003
binder_node_unlock(ref->node);
4004
binder_proc_unlock(proc);
4005
return -EINVAL;
4006
}
4007
ref->freeze = NULL;
4008
/*
4009
* Take the existing freeze object and overwrite its work type. There are three cases here:
4010
* 1. No pending notification. In this case just add the work to the queue.
4011
* 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
4012
* should resend with the new work type.
4013
* 3. A notification is pending to be sent. Since the work is already in the queue, nothing
4014
* needs to be done here.
4015
*/
4016
freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
4017
if (list_empty(&freeze->work.entry)) {
4018
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4019
binder_wakeup_proc_ilocked(proc);
4020
} else if (freeze->sent) {
4021
freeze->resend = true;
4022
}
4023
binder_inner_proc_unlock(proc);
4024
binder_node_unlock(ref->node);
4025
binder_proc_unlock(proc);
4026
return 0;
4027
}
4028
4029
static int
4030
binder_freeze_notification_done(struct binder_proc *proc,
4031
struct binder_thread *thread,
4032
binder_uintptr_t cookie)
4033
{
4034
struct binder_ref_freeze *freeze = NULL;
4035
struct binder_work *w;
4036
4037
binder_inner_proc_lock(proc);
4038
list_for_each_entry(w, &proc->delivered_freeze, entry) {
4039
struct binder_ref_freeze *tmp_freeze =
4040
container_of(w, struct binder_ref_freeze, work);
4041
4042
if (tmp_freeze->cookie == cookie) {
4043
freeze = tmp_freeze;
4044
break;
4045
}
4046
}
4047
if (!freeze) {
4048
binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
4049
proc->pid, thread->pid, (u64)cookie);
4050
binder_inner_proc_unlock(proc);
4051
return -EINVAL;
4052
}
4053
binder_dequeue_work_ilocked(&freeze->work);
4054
freeze->sent = false;
4055
if (freeze->resend) {
4056
freeze->resend = false;
4057
binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
4058
binder_wakeup_proc_ilocked(proc);
4059
}
4060
binder_inner_proc_unlock(proc);
4061
return 0;
4062
}
4063
4064
/**
4065
* binder_free_buf() - free the specified buffer
4066
* @proc: binder proc that owns buffer
4067
* @buffer: buffer to be freed
4068
* @is_failure: failed to send transaction
4069
*
4070
* If buffer for an async transaction, enqueue the next async
4071
* transaction from the node.
4072
*
4073
* Cleanup buffer and free it.
4074
*/
4075
static void
4076
binder_free_buf(struct binder_proc *proc,
4077
struct binder_thread *thread,
4078
struct binder_buffer *buffer, bool is_failure)
4079
{
4080
binder_inner_proc_lock(proc);
4081
if (buffer->transaction) {
4082
buffer->transaction->buffer = NULL;
4083
buffer->transaction = NULL;
4084
}
4085
binder_inner_proc_unlock(proc);
4086
if (buffer->async_transaction && buffer->target_node) {
4087
struct binder_node *buf_node;
4088
struct binder_work *w;
4089
4090
buf_node = buffer->target_node;
4091
binder_node_inner_lock(buf_node);
4092
BUG_ON(!buf_node->has_async_transaction);
4093
BUG_ON(buf_node->proc != proc);
4094
w = binder_dequeue_work_head_ilocked(
4095
&buf_node->async_todo);
4096
if (!w) {
4097
buf_node->has_async_transaction = false;
4098
} else {
4099
binder_enqueue_work_ilocked(
4100
w, &proc->todo);
4101
binder_wakeup_proc_ilocked(proc);
4102
}
4103
binder_node_inner_unlock(buf_node);
4104
}
4105
trace_binder_transaction_buffer_release(buffer);
4106
binder_release_entire_buffer(proc, thread, buffer, is_failure);
4107
binder_alloc_free_buf(&proc->alloc, buffer);
4108
}
4109
4110
static int binder_thread_write(struct binder_proc *proc,
4111
struct binder_thread *thread,
4112
binder_uintptr_t binder_buffer, size_t size,
4113
binder_size_t *consumed)
4114
{
4115
uint32_t cmd;
4116
struct binder_context *context = proc->context;
4117
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4118
void __user *ptr = buffer + *consumed;
4119
void __user *end = buffer + size;
4120
4121
while (ptr < end && thread->return_error.cmd == BR_OK) {
4122
int ret;
4123
4124
if (get_user(cmd, (uint32_t __user *)ptr))
4125
return -EFAULT;
4126
ptr += sizeof(uint32_t);
4127
trace_binder_command(cmd);
4128
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4129
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4130
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4131
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4132
}
4133
switch (cmd) {
4134
case BC_INCREFS:
4135
case BC_ACQUIRE:
4136
case BC_RELEASE:
4137
case BC_DECREFS: {
4138
uint32_t target;
4139
const char *debug_string;
4140
bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4141
bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4142
struct binder_ref_data rdata;
4143
4144
if (get_user(target, (uint32_t __user *)ptr))
4145
return -EFAULT;
4146
4147
ptr += sizeof(uint32_t);
4148
ret = -1;
4149
if (increment && !target) {
4150
struct binder_node *ctx_mgr_node;
4151
4152
mutex_lock(&context->context_mgr_node_lock);
4153
ctx_mgr_node = context->binder_context_mgr_node;
4154
if (ctx_mgr_node) {
4155
if (ctx_mgr_node->proc == proc) {
4156
binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4157
proc->pid, thread->pid);
4158
mutex_unlock(&context->context_mgr_node_lock);
4159
return -EINVAL;
4160
}
4161
ret = binder_inc_ref_for_node(
4162
proc, ctx_mgr_node,
4163
strong, NULL, &rdata);
4164
}
4165
mutex_unlock(&context->context_mgr_node_lock);
4166
}
4167
if (ret)
4168
ret = binder_update_ref_for_handle(
4169
proc, target, increment, strong,
4170
&rdata);
4171
if (!ret && rdata.desc != target) {
4172
binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4173
proc->pid, thread->pid,
4174
target, rdata.desc);
4175
}
4176
switch (cmd) {
4177
case BC_INCREFS:
4178
debug_string = "IncRefs";
4179
break;
4180
case BC_ACQUIRE:
4181
debug_string = "Acquire";
4182
break;
4183
case BC_RELEASE:
4184
debug_string = "Release";
4185
break;
4186
case BC_DECREFS:
4187
default:
4188
debug_string = "DecRefs";
4189
break;
4190
}
4191
if (ret) {
4192
binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4193
proc->pid, thread->pid, debug_string,
4194
strong, target, ret);
4195
break;
4196
}
4197
binder_debug(BINDER_DEBUG_USER_REFS,
4198
"%d:%d %s ref %d desc %d s %d w %d\n",
4199
proc->pid, thread->pid, debug_string,
4200
rdata.debug_id, rdata.desc, rdata.strong,
4201
rdata.weak);
4202
break;
4203
}
4204
case BC_INCREFS_DONE:
4205
case BC_ACQUIRE_DONE: {
4206
binder_uintptr_t node_ptr;
4207
binder_uintptr_t cookie;
4208
struct binder_node *node;
4209
bool free_node;
4210
4211
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4212
return -EFAULT;
4213
ptr += sizeof(binder_uintptr_t);
4214
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4215
return -EFAULT;
4216
ptr += sizeof(binder_uintptr_t);
4217
node = binder_get_node(proc, node_ptr);
4218
if (node == NULL) {
4219
binder_user_error("%d:%d %s u%016llx no match\n",
4220
proc->pid, thread->pid,
4221
cmd == BC_INCREFS_DONE ?
4222
"BC_INCREFS_DONE" :
4223
"BC_ACQUIRE_DONE",
4224
(u64)node_ptr);
4225
break;
4226
}
4227
if (cookie != node->cookie) {
4228
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4229
proc->pid, thread->pid,
4230
cmd == BC_INCREFS_DONE ?
4231
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4232
(u64)node_ptr, node->debug_id,
4233
(u64)cookie, (u64)node->cookie);
4234
binder_put_node(node);
4235
break;
4236
}
4237
binder_node_inner_lock(node);
4238
if (cmd == BC_ACQUIRE_DONE) {
4239
if (node->pending_strong_ref == 0) {
4240
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4241
proc->pid, thread->pid,
4242
node->debug_id);
4243
binder_node_inner_unlock(node);
4244
binder_put_node(node);
4245
break;
4246
}
4247
node->pending_strong_ref = 0;
4248
} else {
4249
if (node->pending_weak_ref == 0) {
4250
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4251
proc->pid, thread->pid,
4252
node->debug_id);
4253
binder_node_inner_unlock(node);
4254
binder_put_node(node);
4255
break;
4256
}
4257
node->pending_weak_ref = 0;
4258
}
4259
free_node = binder_dec_node_nilocked(node,
4260
cmd == BC_ACQUIRE_DONE, 0);
4261
WARN_ON(free_node);
4262
binder_debug(BINDER_DEBUG_USER_REFS,
4263
"%d:%d %s node %d ls %d lw %d tr %d\n",
4264
proc->pid, thread->pid,
4265
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4266
node->debug_id, node->local_strong_refs,
4267
node->local_weak_refs, node->tmp_refs);
4268
binder_node_inner_unlock(node);
4269
binder_put_node(node);
4270
break;
4271
}
4272
case BC_ATTEMPT_ACQUIRE:
4273
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4274
return -EINVAL;
4275
case BC_ACQUIRE_RESULT:
4276
pr_err("BC_ACQUIRE_RESULT not supported\n");
4277
return -EINVAL;
4278
4279
case BC_FREE_BUFFER: {
4280
binder_uintptr_t data_ptr;
4281
struct binder_buffer *buffer;
4282
4283
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4284
return -EFAULT;
4285
ptr += sizeof(binder_uintptr_t);
4286
4287
buffer = binder_alloc_prepare_to_free(&proc->alloc,
4288
data_ptr);
4289
if (IS_ERR_OR_NULL(buffer)) {
4290
if (PTR_ERR(buffer) == -EPERM) {
4291
binder_user_error(
4292
"%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
4293
proc->pid, thread->pid,
4294
(unsigned long)data_ptr - proc->alloc.vm_start);
4295
} else {
4296
binder_user_error(
4297
"%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
4298
proc->pid, thread->pid,
4299
(unsigned long)data_ptr - proc->alloc.vm_start);
4300
}
4301
break;
4302
}
4303
binder_debug(BINDER_DEBUG_FREE_BUFFER,
4304
"%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
4305
proc->pid, thread->pid,
4306
(unsigned long)data_ptr - proc->alloc.vm_start,
4307
buffer->debug_id,
4308
buffer->transaction ? "active" : "finished");
4309
binder_free_buf(proc, thread, buffer, false);
4310
break;
4311
}
4312
4313
case BC_TRANSACTION_SG:
4314
case BC_REPLY_SG: {
4315
struct binder_transaction_data_sg tr;
4316
4317
if (copy_from_user(&tr, ptr, sizeof(tr)))
4318
return -EFAULT;
4319
ptr += sizeof(tr);
4320
binder_transaction(proc, thread, &tr.transaction_data,
4321
cmd == BC_REPLY_SG, tr.buffers_size);
4322
break;
4323
}
4324
case BC_TRANSACTION:
4325
case BC_REPLY: {
4326
struct binder_transaction_data tr;
4327
4328
if (copy_from_user(&tr, ptr, sizeof(tr)))
4329
return -EFAULT;
4330
ptr += sizeof(tr);
4331
binder_transaction(proc, thread, &tr,
4332
cmd == BC_REPLY, 0);
4333
break;
4334
}
4335
4336
case BC_REGISTER_LOOPER:
4337
binder_debug(BINDER_DEBUG_THREADS,
4338
"%d:%d BC_REGISTER_LOOPER\n",
4339
proc->pid, thread->pid);
4340
binder_inner_proc_lock(proc);
4341
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4342
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4343
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4344
proc->pid, thread->pid);
4345
} else if (proc->requested_threads == 0) {
4346
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4347
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4348
proc->pid, thread->pid);
4349
} else {
4350
proc->requested_threads--;
4351
proc->requested_threads_started++;
4352
}
4353
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4354
binder_inner_proc_unlock(proc);
4355
break;
4356
case BC_ENTER_LOOPER:
4357
binder_debug(BINDER_DEBUG_THREADS,
4358
"%d:%d BC_ENTER_LOOPER\n",
4359
proc->pid, thread->pid);
4360
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4361
thread->looper |= BINDER_LOOPER_STATE_INVALID;
4362
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4363
proc->pid, thread->pid);
4364
}
4365
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4366
break;
4367
case BC_EXIT_LOOPER:
4368
binder_debug(BINDER_DEBUG_THREADS,
4369
"%d:%d BC_EXIT_LOOPER\n",
4370
proc->pid, thread->pid);
4371
thread->looper |= BINDER_LOOPER_STATE_EXITED;
4372
break;
4373
4374
case BC_REQUEST_DEATH_NOTIFICATION:
4375
case BC_CLEAR_DEATH_NOTIFICATION: {
4376
uint32_t target;
4377
binder_uintptr_t cookie;
4378
struct binder_ref *ref;
4379
struct binder_ref_death *death = NULL;
4380
4381
if (get_user(target, (uint32_t __user *)ptr))
4382
return -EFAULT;
4383
ptr += sizeof(uint32_t);
4384
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4385
return -EFAULT;
4386
ptr += sizeof(binder_uintptr_t);
4387
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4388
/*
4389
* Allocate memory for death notification
4390
* before taking lock
4391
*/
4392
death = kzalloc(sizeof(*death), GFP_KERNEL);
4393
if (death == NULL) {
4394
WARN_ON(thread->return_error.cmd !=
4395
BR_OK);
4396
thread->return_error.cmd = BR_ERROR;
4397
binder_enqueue_thread_work(
4398
thread,
4399
&thread->return_error.work);
4400
binder_debug(
4401
BINDER_DEBUG_FAILED_TRANSACTION,
4402
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4403
proc->pid, thread->pid);
4404
break;
4405
}
4406
}
4407
binder_proc_lock(proc);
4408
ref = binder_get_ref_olocked(proc, target, false);
4409
if (ref == NULL) {
4410
binder_user_error("%d:%d %s invalid ref %d\n",
4411
proc->pid, thread->pid,
4412
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4413
"BC_REQUEST_DEATH_NOTIFICATION" :
4414
"BC_CLEAR_DEATH_NOTIFICATION",
4415
target);
4416
binder_proc_unlock(proc);
4417
kfree(death);
4418
break;
4419
}
4420
4421
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4422
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4423
proc->pid, thread->pid,
4424
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4425
"BC_REQUEST_DEATH_NOTIFICATION" :
4426
"BC_CLEAR_DEATH_NOTIFICATION",
4427
(u64)cookie, ref->data.debug_id,
4428
ref->data.desc, ref->data.strong,
4429
ref->data.weak, ref->node->debug_id);
4430
4431
binder_node_lock(ref->node);
4432
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4433
if (ref->death) {
4434
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4435
proc->pid, thread->pid);
4436
binder_node_unlock(ref->node);
4437
binder_proc_unlock(proc);
4438
kfree(death);
4439
break;
4440
}
4441
binder_stats_created(BINDER_STAT_DEATH);
4442
INIT_LIST_HEAD(&death->work.entry);
4443
death->cookie = cookie;
4444
ref->death = death;
4445
if (ref->node->proc == NULL) {
4446
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4447
4448
binder_inner_proc_lock(proc);
4449
binder_enqueue_work_ilocked(
4450
&ref->death->work, &proc->todo);
4451
binder_wakeup_proc_ilocked(proc);
4452
binder_inner_proc_unlock(proc);
4453
}
4454
} else {
4455
if (ref->death == NULL) {
4456
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4457
proc->pid, thread->pid);
4458
binder_node_unlock(ref->node);
4459
binder_proc_unlock(proc);
4460
break;
4461
}
4462
death = ref->death;
4463
if (death->cookie != cookie) {
4464
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4465
proc->pid, thread->pid,
4466
(u64)death->cookie,
4467
(u64)cookie);
4468
binder_node_unlock(ref->node);
4469
binder_proc_unlock(proc);
4470
break;
4471
}
4472
ref->death = NULL;
4473
binder_inner_proc_lock(proc);
4474
if (list_empty(&death->work.entry)) {
4475
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4476
if (thread->looper &
4477
(BINDER_LOOPER_STATE_REGISTERED |
4478
BINDER_LOOPER_STATE_ENTERED))
4479
binder_enqueue_thread_work_ilocked(
4480
thread,
4481
&death->work);
4482
else {
4483
binder_enqueue_work_ilocked(
4484
&death->work,
4485
&proc->todo);
4486
binder_wakeup_proc_ilocked(
4487
proc);
4488
}
4489
} else {
4490
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4491
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4492
}
4493
binder_inner_proc_unlock(proc);
4494
}
4495
binder_node_unlock(ref->node);
4496
binder_proc_unlock(proc);
4497
} break;
4498
case BC_DEAD_BINDER_DONE: {
4499
struct binder_work *w;
4500
binder_uintptr_t cookie;
4501
struct binder_ref_death *death = NULL;
4502
4503
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4504
return -EFAULT;
4505
4506
ptr += sizeof(cookie);
4507
binder_inner_proc_lock(proc);
4508
list_for_each_entry(w, &proc->delivered_death,
4509
entry) {
4510
struct binder_ref_death *tmp_death =
4511
container_of(w,
4512
struct binder_ref_death,
4513
work);
4514
4515
if (tmp_death->cookie == cookie) {
4516
death = tmp_death;
4517
break;
4518
}
4519
}
4520
binder_debug(BINDER_DEBUG_DEAD_BINDER,
4521
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4522
proc->pid, thread->pid, (u64)cookie,
4523
death);
4524
if (death == NULL) {
4525
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4526
proc->pid, thread->pid, (u64)cookie);
4527
binder_inner_proc_unlock(proc);
4528
break;
4529
}
4530
binder_dequeue_work_ilocked(&death->work);
4531
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4532
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4533
if (thread->looper &
4534
(BINDER_LOOPER_STATE_REGISTERED |
4535
BINDER_LOOPER_STATE_ENTERED))
4536
binder_enqueue_thread_work_ilocked(
4537
thread, &death->work);
4538
else {
4539
binder_enqueue_work_ilocked(
4540
&death->work,
4541
&proc->todo);
4542
binder_wakeup_proc_ilocked(proc);
4543
}
4544
}
4545
binder_inner_proc_unlock(proc);
4546
} break;
4547
4548
case BC_REQUEST_FREEZE_NOTIFICATION: {
4549
struct binder_handle_cookie handle_cookie;
4550
int error;
4551
4552
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4553
return -EFAULT;
4554
ptr += sizeof(handle_cookie);
4555
error = binder_request_freeze_notification(proc, thread,
4556
&handle_cookie);
4557
if (error)
4558
return error;
4559
} break;
4560
4561
case BC_CLEAR_FREEZE_NOTIFICATION: {
4562
struct binder_handle_cookie handle_cookie;
4563
int error;
4564
4565
if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4566
return -EFAULT;
4567
ptr += sizeof(handle_cookie);
4568
error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4569
if (error)
4570
return error;
4571
} break;
4572
4573
case BC_FREEZE_NOTIFICATION_DONE: {
4574
binder_uintptr_t cookie;
4575
int error;
4576
4577
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4578
return -EFAULT;
4579
4580
ptr += sizeof(cookie);
4581
error = binder_freeze_notification_done(proc, thread, cookie);
4582
if (error)
4583
return error;
4584
} break;
4585
4586
default:
4587
pr_err("%d:%d unknown command %u\n",
4588
proc->pid, thread->pid, cmd);
4589
return -EINVAL;
4590
}
4591
*consumed = ptr - buffer;
4592
}
4593
return 0;
4594
}
4595
4596
static void binder_stat_br(struct binder_proc *proc,
4597
struct binder_thread *thread, uint32_t cmd)
4598
{
4599
trace_binder_return(cmd);
4600
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4601
atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4602
atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4603
atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4604
}
4605
}
4606
4607
static int binder_put_node_cmd(struct binder_proc *proc,
4608
struct binder_thread *thread,
4609
void __user **ptrp,
4610
binder_uintptr_t node_ptr,
4611
binder_uintptr_t node_cookie,
4612
int node_debug_id,
4613
uint32_t cmd, const char *cmd_name)
4614
{
4615
void __user *ptr = *ptrp;
4616
4617
if (put_user(cmd, (uint32_t __user *)ptr))
4618
return -EFAULT;
4619
ptr += sizeof(uint32_t);
4620
4621
if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4622
return -EFAULT;
4623
ptr += sizeof(binder_uintptr_t);
4624
4625
if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4626
return -EFAULT;
4627
ptr += sizeof(binder_uintptr_t);
4628
4629
binder_stat_br(proc, thread, cmd);
4630
binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4631
proc->pid, thread->pid, cmd_name, node_debug_id,
4632
(u64)node_ptr, (u64)node_cookie);
4633
4634
*ptrp = ptr;
4635
return 0;
4636
}
4637
4638
static int binder_wait_for_work(struct binder_thread *thread,
4639
bool do_proc_work)
4640
{
4641
DEFINE_WAIT(wait);
4642
struct binder_proc *proc = thread->proc;
4643
int ret = 0;
4644
4645
binder_inner_proc_lock(proc);
4646
for (;;) {
4647
prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4648
if (binder_has_work_ilocked(thread, do_proc_work))
4649
break;
4650
if (do_proc_work)
4651
list_add(&thread->waiting_thread_node,
4652
&proc->waiting_threads);
4653
binder_inner_proc_unlock(proc);
4654
schedule();
4655
binder_inner_proc_lock(proc);
4656
list_del_init(&thread->waiting_thread_node);
4657
if (signal_pending(current)) {
4658
ret = -EINTR;
4659
break;
4660
}
4661
}
4662
finish_wait(&thread->wait, &wait);
4663
binder_inner_proc_unlock(proc);
4664
4665
return ret;
4666
}
4667
4668
/**
4669
* binder_apply_fd_fixups() - finish fd translation
4670
* @proc: binder_proc associated @t->buffer
4671
* @t: binder transaction with list of fd fixups
4672
*
4673
* Now that we are in the context of the transaction target
4674
* process, we can allocate and install fds. Process the
4675
* list of fds to translate and fixup the buffer with the
4676
* new fds first and only then install the files.
4677
*
4678
* If we fail to allocate an fd, skip the install and release
4679
* any fds that have already been allocated.
4680
*/
4681
static int binder_apply_fd_fixups(struct binder_proc *proc,
4682
struct binder_transaction *t)
4683
{
4684
struct binder_txn_fd_fixup *fixup, *tmp;
4685
int ret = 0;
4686
4687
list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4688
int fd = get_unused_fd_flags(O_CLOEXEC);
4689
4690
if (fd < 0) {
4691
binder_debug(BINDER_DEBUG_TRANSACTION,
4692
"failed fd fixup txn %d fd %d\n",
4693
t->debug_id, fd);
4694
ret = -ENOMEM;
4695
goto err;
4696
}
4697
binder_debug(BINDER_DEBUG_TRANSACTION,
4698
"fd fixup txn %d fd %d\n",
4699
t->debug_id, fd);
4700
trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4701
fixup->target_fd = fd;
4702
if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4703
fixup->offset, &fd,
4704
sizeof(u32))) {
4705
ret = -EINVAL;
4706
goto err;
4707
}
4708
}
4709
list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4710
fd_install(fixup->target_fd, fixup->file);
4711
list_del(&fixup->fixup_entry);
4712
kfree(fixup);
4713
}
4714
4715
return ret;
4716
4717
err:
4718
binder_free_txn_fixups(t);
4719
return ret;
4720
}
4721
4722
static int binder_thread_read(struct binder_proc *proc,
4723
struct binder_thread *thread,
4724
binder_uintptr_t binder_buffer, size_t size,
4725
binder_size_t *consumed, int non_block)
4726
{
4727
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4728
void __user *ptr = buffer + *consumed;
4729
void __user *end = buffer + size;
4730
4731
int ret = 0;
4732
int wait_for_proc_work;
4733
4734
if (*consumed == 0) {
4735
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4736
return -EFAULT;
4737
ptr += sizeof(uint32_t);
4738
}
4739
4740
retry:
4741
binder_inner_proc_lock(proc);
4742
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4743
binder_inner_proc_unlock(proc);
4744
4745
thread->looper |= BINDER_LOOPER_STATE_WAITING;
4746
4747
trace_binder_wait_for_work(wait_for_proc_work,
4748
!!thread->transaction_stack,
4749
!binder_worklist_empty(proc, &thread->todo));
4750
if (wait_for_proc_work) {
4751
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4752
BINDER_LOOPER_STATE_ENTERED))) {
4753
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4754
proc->pid, thread->pid, thread->looper);
4755
wait_event_interruptible(binder_user_error_wait,
4756
binder_stop_on_user_error < 2);
4757
}
4758
binder_set_nice(proc->default_priority);
4759
}
4760
4761
if (non_block) {
4762
if (!binder_has_work(thread, wait_for_proc_work))
4763
ret = -EAGAIN;
4764
} else {
4765
ret = binder_wait_for_work(thread, wait_for_proc_work);
4766
}
4767
4768
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4769
4770
if (ret)
4771
return ret;
4772
4773
while (1) {
4774
uint32_t cmd;
4775
struct binder_transaction_data_secctx tr;
4776
struct binder_transaction_data *trd = &tr.transaction_data;
4777
struct binder_work *w = NULL;
4778
struct list_head *list = NULL;
4779
struct binder_transaction *t = NULL;
4780
struct binder_thread *t_from;
4781
size_t trsize = sizeof(*trd);
4782
4783
binder_inner_proc_lock(proc);
4784
if (!binder_worklist_empty_ilocked(&thread->todo))
4785
list = &thread->todo;
4786
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4787
wait_for_proc_work)
4788
list = &proc->todo;
4789
else {
4790
binder_inner_proc_unlock(proc);
4791
4792
/* no data added */
4793
if (ptr - buffer == 4 && !thread->looper_need_return)
4794
goto retry;
4795
break;
4796
}
4797
4798
if (end - ptr < sizeof(tr) + 4) {
4799
binder_inner_proc_unlock(proc);
4800
break;
4801
}
4802
w = binder_dequeue_work_head_ilocked(list);
4803
if (binder_worklist_empty_ilocked(&thread->todo))
4804
thread->process_todo = false;
4805
4806
switch (w->type) {
4807
case BINDER_WORK_TRANSACTION: {
4808
binder_inner_proc_unlock(proc);
4809
t = container_of(w, struct binder_transaction, work);
4810
} break;
4811
case BINDER_WORK_RETURN_ERROR: {
4812
struct binder_error *e = container_of(
4813
w, struct binder_error, work);
4814
4815
WARN_ON(e->cmd == BR_OK);
4816
binder_inner_proc_unlock(proc);
4817
if (put_user(e->cmd, (uint32_t __user *)ptr))
4818
return -EFAULT;
4819
cmd = e->cmd;
4820
e->cmd = BR_OK;
4821
ptr += sizeof(uint32_t);
4822
4823
binder_stat_br(proc, thread, cmd);
4824
} break;
4825
case BINDER_WORK_TRANSACTION_COMPLETE:
4826
case BINDER_WORK_TRANSACTION_PENDING:
4827
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4828
if (proc->oneway_spam_detection_enabled &&
4829
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4830
cmd = BR_ONEWAY_SPAM_SUSPECT;
4831
else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4832
cmd = BR_TRANSACTION_PENDING_FROZEN;
4833
else
4834
cmd = BR_TRANSACTION_COMPLETE;
4835
binder_inner_proc_unlock(proc);
4836
kfree(w);
4837
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4838
if (put_user(cmd, (uint32_t __user *)ptr))
4839
return -EFAULT;
4840
ptr += sizeof(uint32_t);
4841
4842
binder_stat_br(proc, thread, cmd);
4843
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4844
"%d:%d BR_TRANSACTION_COMPLETE\n",
4845
proc->pid, thread->pid);
4846
} break;
4847
case BINDER_WORK_NODE: {
4848
struct binder_node *node = container_of(w, struct binder_node, work);
4849
int strong, weak;
4850
binder_uintptr_t node_ptr = node->ptr;
4851
binder_uintptr_t node_cookie = node->cookie;
4852
int node_debug_id = node->debug_id;
4853
int has_weak_ref;
4854
int has_strong_ref;
4855
void __user *orig_ptr = ptr;
4856
4857
BUG_ON(proc != node->proc);
4858
strong = node->internal_strong_refs ||
4859
node->local_strong_refs;
4860
weak = !hlist_empty(&node->refs) ||
4861
node->local_weak_refs ||
4862
node->tmp_refs || strong;
4863
has_strong_ref = node->has_strong_ref;
4864
has_weak_ref = node->has_weak_ref;
4865
4866
if (weak && !has_weak_ref) {
4867
node->has_weak_ref = 1;
4868
node->pending_weak_ref = 1;
4869
node->local_weak_refs++;
4870
}
4871
if (strong && !has_strong_ref) {
4872
node->has_strong_ref = 1;
4873
node->pending_strong_ref = 1;
4874
node->local_strong_refs++;
4875
}
4876
if (!strong && has_strong_ref)
4877
node->has_strong_ref = 0;
4878
if (!weak && has_weak_ref)
4879
node->has_weak_ref = 0;
4880
if (!weak && !strong) {
4881
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4882
"%d:%d node %d u%016llx c%016llx deleted\n",
4883
proc->pid, thread->pid,
4884
node_debug_id,
4885
(u64)node_ptr,
4886
(u64)node_cookie);
4887
rb_erase(&node->rb_node, &proc->nodes);
4888
binder_inner_proc_unlock(proc);
4889
binder_node_lock(node);
4890
/*
4891
* Acquire the node lock before freeing the
4892
* node to serialize with other threads that
4893
* may have been holding the node lock while
4894
* decrementing this node (avoids race where
4895
* this thread frees while the other thread
4896
* is unlocking the node after the final
4897
* decrement)
4898
*/
4899
binder_node_unlock(node);
4900
binder_free_node(node);
4901
} else
4902
binder_inner_proc_unlock(proc);
4903
4904
if (weak && !has_weak_ref)
4905
ret = binder_put_node_cmd(
4906
proc, thread, &ptr, node_ptr,
4907
node_cookie, node_debug_id,
4908
BR_INCREFS, "BR_INCREFS");
4909
if (!ret && strong && !has_strong_ref)
4910
ret = binder_put_node_cmd(
4911
proc, thread, &ptr, node_ptr,
4912
node_cookie, node_debug_id,
4913
BR_ACQUIRE, "BR_ACQUIRE");
4914
if (!ret && !strong && has_strong_ref)
4915
ret = binder_put_node_cmd(
4916
proc, thread, &ptr, node_ptr,
4917
node_cookie, node_debug_id,
4918
BR_RELEASE, "BR_RELEASE");
4919
if (!ret && !weak && has_weak_ref)
4920
ret = binder_put_node_cmd(
4921
proc, thread, &ptr, node_ptr,
4922
node_cookie, node_debug_id,
4923
BR_DECREFS, "BR_DECREFS");
4924
if (orig_ptr == ptr)
4925
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4926
"%d:%d node %d u%016llx c%016llx state unchanged\n",
4927
proc->pid, thread->pid,
4928
node_debug_id,
4929
(u64)node_ptr,
4930
(u64)node_cookie);
4931
if (ret)
4932
return ret;
4933
} break;
4934
case BINDER_WORK_DEAD_BINDER:
4935
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4936
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4937
struct binder_ref_death *death;
4938
uint32_t cmd;
4939
binder_uintptr_t cookie;
4940
4941
death = container_of(w, struct binder_ref_death, work);
4942
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4943
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4944
else
4945
cmd = BR_DEAD_BINDER;
4946
cookie = death->cookie;
4947
4948
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4949
"%d:%d %s %016llx\n",
4950
proc->pid, thread->pid,
4951
cmd == BR_DEAD_BINDER ?
4952
"BR_DEAD_BINDER" :
4953
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
4954
(u64)cookie);
4955
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4956
binder_inner_proc_unlock(proc);
4957
kfree(death);
4958
binder_stats_deleted(BINDER_STAT_DEATH);
4959
} else {
4960
binder_enqueue_work_ilocked(
4961
w, &proc->delivered_death);
4962
binder_inner_proc_unlock(proc);
4963
}
4964
if (put_user(cmd, (uint32_t __user *)ptr))
4965
return -EFAULT;
4966
ptr += sizeof(uint32_t);
4967
if (put_user(cookie,
4968
(binder_uintptr_t __user *)ptr))
4969
return -EFAULT;
4970
ptr += sizeof(binder_uintptr_t);
4971
binder_stat_br(proc, thread, cmd);
4972
if (cmd == BR_DEAD_BINDER)
4973
goto done; /* DEAD_BINDER notifications can cause transactions */
4974
} break;
4975
4976
case BINDER_WORK_FROZEN_BINDER: {
4977
struct binder_ref_freeze *freeze;
4978
struct binder_frozen_state_info info;
4979
4980
memset(&info, 0, sizeof(info));
4981
freeze = container_of(w, struct binder_ref_freeze, work);
4982
info.is_frozen = freeze->is_frozen;
4983
info.cookie = freeze->cookie;
4984
freeze->sent = true;
4985
binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4986
binder_inner_proc_unlock(proc);
4987
4988
if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4989
return -EFAULT;
4990
ptr += sizeof(uint32_t);
4991
if (copy_to_user(ptr, &info, sizeof(info)))
4992
return -EFAULT;
4993
ptr += sizeof(info);
4994
binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4995
goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4996
} break;
4997
4998
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4999
struct binder_ref_freeze *freeze =
5000
container_of(w, struct binder_ref_freeze, work);
5001
binder_uintptr_t cookie = freeze->cookie;
5002
5003
binder_inner_proc_unlock(proc);
5004
kfree(freeze);
5005
binder_stats_deleted(BINDER_STAT_FREEZE);
5006
if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
5007
return -EFAULT;
5008
ptr += sizeof(uint32_t);
5009
if (put_user(cookie, (binder_uintptr_t __user *)ptr))
5010
return -EFAULT;
5011
ptr += sizeof(binder_uintptr_t);
5012
binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
5013
} break;
5014
5015
default:
5016
binder_inner_proc_unlock(proc);
5017
pr_err("%d:%d: bad work type %d\n",
5018
proc->pid, thread->pid, w->type);
5019
break;
5020
}
5021
5022
if (!t)
5023
continue;
5024
5025
BUG_ON(t->buffer == NULL);
5026
if (t->buffer->target_node) {
5027
struct binder_node *target_node = t->buffer->target_node;
5028
5029
trd->target.ptr = target_node->ptr;
5030
trd->cookie = target_node->cookie;
5031
t->saved_priority = task_nice(current);
5032
if (t->priority < target_node->min_priority &&
5033
!(t->flags & TF_ONE_WAY))
5034
binder_set_nice(t->priority);
5035
else if (!(t->flags & TF_ONE_WAY) ||
5036
t->saved_priority > target_node->min_priority)
5037
binder_set_nice(target_node->min_priority);
5038
cmd = BR_TRANSACTION;
5039
} else {
5040
trd->target.ptr = 0;
5041
trd->cookie = 0;
5042
cmd = BR_REPLY;
5043
}
5044
trd->code = t->code;
5045
trd->flags = t->flags;
5046
trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
5047
5048
t_from = binder_get_txn_from(t);
5049
if (t_from) {
5050
struct task_struct *sender = t_from->proc->tsk;
5051
5052
trd->sender_pid =
5053
task_tgid_nr_ns(sender,
5054
task_active_pid_ns(current));
5055
} else {
5056
trd->sender_pid = 0;
5057
}
5058
5059
ret = binder_apply_fd_fixups(proc, t);
5060
if (ret) {
5061
struct binder_buffer *buffer = t->buffer;
5062
bool oneway = !!(t->flags & TF_ONE_WAY);
5063
int tid = t->debug_id;
5064
5065
if (t_from)
5066
binder_thread_dec_tmpref(t_from);
5067
buffer->transaction = NULL;
5068
binder_cleanup_transaction(t, "fd fixups failed",
5069
BR_FAILED_REPLY);
5070
binder_free_buf(proc, thread, buffer, true);
5071
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5072
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5073
proc->pid, thread->pid,
5074
oneway ? "async " :
5075
(cmd == BR_REPLY ? "reply " : ""),
5076
tid, BR_FAILED_REPLY, ret, __LINE__);
5077
if (cmd == BR_REPLY) {
5078
cmd = BR_FAILED_REPLY;
5079
if (put_user(cmd, (uint32_t __user *)ptr))
5080
return -EFAULT;
5081
ptr += sizeof(uint32_t);
5082
binder_stat_br(proc, thread, cmd);
5083
break;
5084
}
5085
continue;
5086
}
5087
trd->data_size = t->buffer->data_size;
5088
trd->offsets_size = t->buffer->offsets_size;
5089
trd->data.ptr.buffer = t->buffer->user_data;
5090
trd->data.ptr.offsets = trd->data.ptr.buffer +
5091
ALIGN(t->buffer->data_size,
5092
sizeof(void *));
5093
5094
tr.secctx = t->security_ctx;
5095
if (t->security_ctx) {
5096
cmd = BR_TRANSACTION_SEC_CTX;
5097
trsize = sizeof(tr);
5098
}
5099
if (put_user(cmd, (uint32_t __user *)ptr)) {
5100
if (t_from)
5101
binder_thread_dec_tmpref(t_from);
5102
5103
binder_cleanup_transaction(t, "put_user failed",
5104
BR_FAILED_REPLY);
5105
5106
return -EFAULT;
5107
}
5108
ptr += sizeof(uint32_t);
5109
if (copy_to_user(ptr, &tr, trsize)) {
5110
if (t_from)
5111
binder_thread_dec_tmpref(t_from);
5112
5113
binder_cleanup_transaction(t, "copy_to_user failed",
5114
BR_FAILED_REPLY);
5115
5116
return -EFAULT;
5117
}
5118
ptr += trsize;
5119
5120
trace_binder_transaction_received(t);
5121
binder_stat_br(proc, thread, cmd);
5122
binder_debug(BINDER_DEBUG_TRANSACTION,
5123
"%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
5124
proc->pid, thread->pid,
5125
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5126
(cmd == BR_TRANSACTION_SEC_CTX) ?
5127
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5128
t->debug_id, t_from ? t_from->proc->pid : 0,
5129
t_from ? t_from->pid : 0, cmd,
5130
t->buffer->data_size, t->buffer->offsets_size);
5131
5132
if (t_from)
5133
binder_thread_dec_tmpref(t_from);
5134
t->buffer->allow_user_free = 1;
5135
if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5136
binder_inner_proc_lock(thread->proc);
5137
t->to_parent = thread->transaction_stack;
5138
t->to_thread = thread;
5139
thread->transaction_stack = t;
5140
binder_inner_proc_unlock(thread->proc);
5141
} else {
5142
binder_free_transaction(t);
5143
}
5144
break;
5145
}
5146
5147
done:
5148
5149
*consumed = ptr - buffer;
5150
binder_inner_proc_lock(proc);
5151
if (proc->requested_threads == 0 &&
5152
list_empty(&thread->proc->waiting_threads) &&
5153
proc->requested_threads_started < proc->max_threads &&
5154
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5155
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5156
/*spawn a new thread if we leave this out */) {
5157
proc->requested_threads++;
5158
binder_inner_proc_unlock(proc);
5159
binder_debug(BINDER_DEBUG_THREADS,
5160
"%d:%d BR_SPAWN_LOOPER\n",
5161
proc->pid, thread->pid);
5162
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5163
return -EFAULT;
5164
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5165
} else
5166
binder_inner_proc_unlock(proc);
5167
return 0;
5168
}
5169
5170
static void binder_release_work(struct binder_proc *proc,
5171
struct list_head *list)
5172
{
5173
struct binder_work *w;
5174
enum binder_work_type wtype;
5175
5176
while (1) {
5177
binder_inner_proc_lock(proc);
5178
w = binder_dequeue_work_head_ilocked(list);
5179
wtype = w ? w->type : 0;
5180
binder_inner_proc_unlock(proc);
5181
if (!w)
5182
return;
5183
5184
switch (wtype) {
5185
case BINDER_WORK_TRANSACTION: {
5186
struct binder_transaction *t;
5187
5188
t = container_of(w, struct binder_transaction, work);
5189
5190
binder_cleanup_transaction(t, "process died.",
5191
BR_DEAD_REPLY);
5192
} break;
5193
case BINDER_WORK_RETURN_ERROR: {
5194
struct binder_error *e = container_of(
5195
w, struct binder_error, work);
5196
5197
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5198
"undelivered TRANSACTION_ERROR: %u\n",
5199
e->cmd);
5200
} break;
5201
case BINDER_WORK_TRANSACTION_PENDING:
5202
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5203
case BINDER_WORK_TRANSACTION_COMPLETE: {
5204
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5205
"undelivered TRANSACTION_COMPLETE\n");
5206
kfree(w);
5207
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5208
} break;
5209
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5210
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5211
struct binder_ref_death *death;
5212
5213
death = container_of(w, struct binder_ref_death, work);
5214
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5215
"undelivered death notification, %016llx\n",
5216
(u64)death->cookie);
5217
kfree(death);
5218
binder_stats_deleted(BINDER_STAT_DEATH);
5219
} break;
5220
case BINDER_WORK_NODE:
5221
break;
5222
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5223
struct binder_ref_freeze *freeze;
5224
5225
freeze = container_of(w, struct binder_ref_freeze, work);
5226
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5227
"undelivered freeze notification, %016llx\n",
5228
(u64)freeze->cookie);
5229
kfree(freeze);
5230
binder_stats_deleted(BINDER_STAT_FREEZE);
5231
} break;
5232
default:
5233
pr_err("unexpected work type, %d, not freed\n",
5234
wtype);
5235
break;
5236
}
5237
}
5238
5239
}
5240
5241
static struct binder_thread *binder_get_thread_ilocked(
5242
struct binder_proc *proc, struct binder_thread *new_thread)
5243
{
5244
struct binder_thread *thread = NULL;
5245
struct rb_node *parent = NULL;
5246
struct rb_node **p = &proc->threads.rb_node;
5247
5248
while (*p) {
5249
parent = *p;
5250
thread = rb_entry(parent, struct binder_thread, rb_node);
5251
5252
if (current->pid < thread->pid)
5253
p = &(*p)->rb_left;
5254
else if (current->pid > thread->pid)
5255
p = &(*p)->rb_right;
5256
else
5257
return thread;
5258
}
5259
if (!new_thread)
5260
return NULL;
5261
thread = new_thread;
5262
binder_stats_created(BINDER_STAT_THREAD);
5263
thread->proc = proc;
5264
thread->pid = current->pid;
5265
atomic_set(&thread->tmp_ref, 0);
5266
init_waitqueue_head(&thread->wait);
5267
INIT_LIST_HEAD(&thread->todo);
5268
rb_link_node(&thread->rb_node, parent, p);
5269
rb_insert_color(&thread->rb_node, &proc->threads);
5270
thread->looper_need_return = true;
5271
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5272
thread->return_error.cmd = BR_OK;
5273
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5274
thread->reply_error.cmd = BR_OK;
5275
thread->ee.command = BR_OK;
5276
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5277
return thread;
5278
}
5279
5280
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5281
{
5282
struct binder_thread *thread;
5283
struct binder_thread *new_thread;
5284
5285
binder_inner_proc_lock(proc);
5286
thread = binder_get_thread_ilocked(proc, NULL);
5287
binder_inner_proc_unlock(proc);
5288
if (!thread) {
5289
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5290
if (new_thread == NULL)
5291
return NULL;
5292
binder_inner_proc_lock(proc);
5293
thread = binder_get_thread_ilocked(proc, new_thread);
5294
binder_inner_proc_unlock(proc);
5295
if (thread != new_thread)
5296
kfree(new_thread);
5297
}
5298
return thread;
5299
}
5300
5301
static void binder_free_proc(struct binder_proc *proc)
5302
{
5303
struct binder_device *device;
5304
5305
BUG_ON(!list_empty(&proc->todo));
5306
BUG_ON(!list_empty(&proc->delivered_death));
5307
if (proc->outstanding_txns)
5308
pr_warn("%s: Unexpected outstanding_txns %d\n",
5309
__func__, proc->outstanding_txns);
5310
device = container_of(proc->context, struct binder_device, context);
5311
if (refcount_dec_and_test(&device->ref)) {
5312
binder_remove_device(device);
5313
kfree(proc->context->name);
5314
kfree(device);
5315
}
5316
binder_alloc_deferred_release(&proc->alloc);
5317
put_task_struct(proc->tsk);
5318
put_cred(proc->cred);
5319
binder_stats_deleted(BINDER_STAT_PROC);
5320
dbitmap_free(&proc->dmap);
5321
kfree(proc);
5322
}
5323
5324
static void binder_free_thread(struct binder_thread *thread)
5325
{
5326
BUG_ON(!list_empty(&thread->todo));
5327
binder_stats_deleted(BINDER_STAT_THREAD);
5328
binder_proc_dec_tmpref(thread->proc);
5329
kfree(thread);
5330
}
5331
5332
static int binder_thread_release(struct binder_proc *proc,
5333
struct binder_thread *thread)
5334
{
5335
struct binder_transaction *t;
5336
struct binder_transaction *send_reply = NULL;
5337
int active_transactions = 0;
5338
struct binder_transaction *last_t = NULL;
5339
5340
binder_inner_proc_lock(thread->proc);
5341
/*
5342
* take a ref on the proc so it survives
5343
* after we remove this thread from proc->threads.
5344
* The corresponding dec is when we actually
5345
* free the thread in binder_free_thread()
5346
*/
5347
proc->tmp_ref++;
5348
/*
5349
* take a ref on this thread to ensure it
5350
* survives while we are releasing it
5351
*/
5352
atomic_inc(&thread->tmp_ref);
5353
rb_erase(&thread->rb_node, &proc->threads);
5354
t = thread->transaction_stack;
5355
if (t) {
5356
spin_lock(&t->lock);
5357
if (t->to_thread == thread)
5358
send_reply = t;
5359
} else {
5360
__acquire(&t->lock);
5361
}
5362
thread->is_dead = true;
5363
5364
while (t) {
5365
last_t = t;
5366
active_transactions++;
5367
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5368
"release %d:%d transaction %d %s, still active\n",
5369
proc->pid, thread->pid,
5370
t->debug_id,
5371
(t->to_thread == thread) ? "in" : "out");
5372
5373
if (t->to_thread == thread) {
5374
thread->proc->outstanding_txns--;
5375
t->to_proc = NULL;
5376
t->to_thread = NULL;
5377
if (t->buffer) {
5378
t->buffer->transaction = NULL;
5379
t->buffer = NULL;
5380
}
5381
t = t->to_parent;
5382
} else if (t->from == thread) {
5383
t->from = NULL;
5384
t = t->from_parent;
5385
} else
5386
BUG();
5387
spin_unlock(&last_t->lock);
5388
if (t)
5389
spin_lock(&t->lock);
5390
else
5391
__acquire(&t->lock);
5392
}
5393
/* annotation for sparse, lock not acquired in last iteration above */
5394
__release(&t->lock);
5395
5396
/*
5397
* If this thread used poll, make sure we remove the waitqueue from any
5398
* poll data structures holding it.
5399
*/
5400
if (thread->looper & BINDER_LOOPER_STATE_POLL)
5401
wake_up_pollfree(&thread->wait);
5402
5403
binder_inner_proc_unlock(thread->proc);
5404
5405
/*
5406
* This is needed to avoid races between wake_up_pollfree() above and
5407
* someone else removing the last entry from the queue for other reasons
5408
* (e.g. ep_remove_wait_queue() being called due to an epoll file
5409
* descriptor being closed). Such other users hold an RCU read lock, so
5410
* we can be sure they're done after we call synchronize_rcu().
5411
*/
5412
if (thread->looper & BINDER_LOOPER_STATE_POLL)
5413
synchronize_rcu();
5414
5415
if (send_reply)
5416
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5417
binder_release_work(proc, &thread->todo);
5418
binder_thread_dec_tmpref(thread);
5419
return active_transactions;
5420
}
5421
5422
static __poll_t binder_poll(struct file *filp,
5423
struct poll_table_struct *wait)
5424
{
5425
struct binder_proc *proc = filp->private_data;
5426
struct binder_thread *thread = NULL;
5427
bool wait_for_proc_work;
5428
5429
thread = binder_get_thread(proc);
5430
if (!thread)
5431
return EPOLLERR;
5432
5433
binder_inner_proc_lock(thread->proc);
5434
thread->looper |= BINDER_LOOPER_STATE_POLL;
5435
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5436
5437
binder_inner_proc_unlock(thread->proc);
5438
5439
poll_wait(filp, &thread->wait, wait);
5440
5441
if (binder_has_work(thread, wait_for_proc_work))
5442
return EPOLLIN;
5443
5444
return 0;
5445
}
5446
5447
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5448
struct binder_thread *thread)
5449
{
5450
int ret = 0;
5451
struct binder_proc *proc = filp->private_data;
5452
void __user *ubuf = (void __user *)arg;
5453
struct binder_write_read bwr;
5454
5455
if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
5456
return -EFAULT;
5457
5458
binder_debug(BINDER_DEBUG_READ_WRITE,
5459
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
5460
proc->pid, thread->pid,
5461
(u64)bwr.write_size, (u64)bwr.write_buffer,
5462
(u64)bwr.read_size, (u64)bwr.read_buffer);
5463
5464
if (bwr.write_size > 0) {
5465
ret = binder_thread_write(proc, thread,
5466
bwr.write_buffer,
5467
bwr.write_size,
5468
&bwr.write_consumed);
5469
trace_binder_write_done(ret);
5470
if (ret < 0) {
5471
bwr.read_consumed = 0;
5472
goto out;
5473
}
5474
}
5475
if (bwr.read_size > 0) {
5476
ret = binder_thread_read(proc, thread, bwr.read_buffer,
5477
bwr.read_size,
5478
&bwr.read_consumed,
5479
filp->f_flags & O_NONBLOCK);
5480
trace_binder_read_done(ret);
5481
binder_inner_proc_lock(proc);
5482
if (!binder_worklist_empty_ilocked(&proc->todo))
5483
binder_wakeup_proc_ilocked(proc);
5484
binder_inner_proc_unlock(proc);
5485
if (ret < 0)
5486
goto out;
5487
}
5488
binder_debug(BINDER_DEBUG_READ_WRITE,
5489
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5490
proc->pid, thread->pid,
5491
(u64)bwr.write_consumed, (u64)bwr.write_size,
5492
(u64)bwr.read_consumed, (u64)bwr.read_size);
5493
out:
5494
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5495
ret = -EFAULT;
5496
return ret;
5497
}
5498
5499
static int binder_ioctl_set_ctx_mgr(struct file *filp,
5500
struct flat_binder_object *fbo)
5501
{
5502
int ret = 0;
5503
struct binder_proc *proc = filp->private_data;
5504
struct binder_context *context = proc->context;
5505
struct binder_node *new_node;
5506
kuid_t curr_euid = current_euid();
5507
5508
guard(mutex)(&context->context_mgr_node_lock);
5509
if (context->binder_context_mgr_node) {
5510
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5511
return -EBUSY;
5512
}
5513
ret = security_binder_set_context_mgr(proc->cred);
5514
if (ret < 0)
5515
return ret;
5516
if (uid_valid(context->binder_context_mgr_uid)) {
5517
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5518
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5519
from_kuid(&init_user_ns, curr_euid),
5520
from_kuid(&init_user_ns,
5521
context->binder_context_mgr_uid));
5522
return -EPERM;
5523
}
5524
} else {
5525
context->binder_context_mgr_uid = curr_euid;
5526
}
5527
new_node = binder_new_node(proc, fbo);
5528
if (!new_node)
5529
return -ENOMEM;
5530
binder_node_lock(new_node);
5531
new_node->local_weak_refs++;
5532
new_node->local_strong_refs++;
5533
new_node->has_strong_ref = 1;
5534
new_node->has_weak_ref = 1;
5535
context->binder_context_mgr_node = new_node;
5536
binder_node_unlock(new_node);
5537
binder_put_node(new_node);
5538
return ret;
5539
}
5540
5541
static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5542
struct binder_node_info_for_ref *info)
5543
{
5544
struct binder_node *node;
5545
struct binder_context *context = proc->context;
5546
__u32 handle = info->handle;
5547
5548
if (info->strong_count || info->weak_count || info->reserved1 ||
5549
info->reserved2 || info->reserved3) {
5550
binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5551
proc->pid);
5552
return -EINVAL;
5553
}
5554
5555
/* This ioctl may only be used by the context manager */
5556
mutex_lock(&context->context_mgr_node_lock);
5557
if (!context->binder_context_mgr_node ||
5558
context->binder_context_mgr_node->proc != proc) {
5559
mutex_unlock(&context->context_mgr_node_lock);
5560
return -EPERM;
5561
}
5562
mutex_unlock(&context->context_mgr_node_lock);
5563
5564
node = binder_get_node_from_ref(proc, handle, true, NULL);
5565
if (!node)
5566
return -EINVAL;
5567
5568
info->strong_count = node->local_strong_refs +
5569
node->internal_strong_refs;
5570
info->weak_count = node->local_weak_refs;
5571
5572
binder_put_node(node);
5573
5574
return 0;
5575
}
5576
5577
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5578
struct binder_node_debug_info *info)
5579
{
5580
struct rb_node *n;
5581
binder_uintptr_t ptr = info->ptr;
5582
5583
memset(info, 0, sizeof(*info));
5584
5585
binder_inner_proc_lock(proc);
5586
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5587
struct binder_node *node = rb_entry(n, struct binder_node,
5588
rb_node);
5589
if (node->ptr > ptr) {
5590
info->ptr = node->ptr;
5591
info->cookie = node->cookie;
5592
info->has_strong_ref = node->has_strong_ref;
5593
info->has_weak_ref = node->has_weak_ref;
5594
break;
5595
}
5596
}
5597
binder_inner_proc_unlock(proc);
5598
5599
return 0;
5600
}
5601
5602
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5603
{
5604
struct rb_node *n;
5605
struct binder_thread *thread;
5606
5607
if (proc->outstanding_txns > 0)
5608
return true;
5609
5610
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5611
thread = rb_entry(n, struct binder_thread, rb_node);
5612
if (thread->transaction_stack)
5613
return true;
5614
}
5615
return false;
5616
}
5617
5618
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5619
{
5620
struct binder_node *prev = NULL;
5621
struct rb_node *n;
5622
struct binder_ref *ref;
5623
5624
binder_inner_proc_lock(proc);
5625
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5626
struct binder_node *node;
5627
5628
node = rb_entry(n, struct binder_node, rb_node);
5629
binder_inc_node_tmpref_ilocked(node);
5630
binder_inner_proc_unlock(proc);
5631
if (prev)
5632
binder_put_node(prev);
5633
binder_node_lock(node);
5634
hlist_for_each_entry(ref, &node->refs, node_entry) {
5635
/*
5636
* Need the node lock to synchronize
5637
* with new notification requests and the
5638
* inner lock to synchronize with queued
5639
* freeze notifications.
5640
*/
5641
binder_inner_proc_lock(ref->proc);
5642
if (!ref->freeze) {
5643
binder_inner_proc_unlock(ref->proc);
5644
continue;
5645
}
5646
ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5647
if (list_empty(&ref->freeze->work.entry)) {
5648
ref->freeze->is_frozen = is_frozen;
5649
binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5650
binder_wakeup_proc_ilocked(ref->proc);
5651
} else {
5652
if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5653
ref->freeze->resend = true;
5654
ref->freeze->is_frozen = is_frozen;
5655
}
5656
binder_inner_proc_unlock(ref->proc);
5657
}
5658
prev = node;
5659
binder_node_unlock(node);
5660
binder_inner_proc_lock(proc);
5661
if (proc->is_dead)
5662
break;
5663
}
5664
binder_inner_proc_unlock(proc);
5665
if (prev)
5666
binder_put_node(prev);
5667
}
5668
5669
static int binder_ioctl_freeze(struct binder_freeze_info *info,
5670
struct binder_proc *target_proc)
5671
{
5672
int ret = 0;
5673
5674
if (!info->enable) {
5675
binder_inner_proc_lock(target_proc);
5676
target_proc->sync_recv = false;
5677
target_proc->async_recv = false;
5678
target_proc->is_frozen = false;
5679
binder_inner_proc_unlock(target_proc);
5680
binder_add_freeze_work(target_proc, false);
5681
return 0;
5682
}
5683
5684
/*
5685
* Freezing the target. Prevent new transactions by
5686
* setting frozen state. If timeout specified, wait
5687
* for transactions to drain.
5688
*/
5689
binder_inner_proc_lock(target_proc);
5690
target_proc->sync_recv = false;
5691
target_proc->async_recv = false;
5692
target_proc->is_frozen = true;
5693
binder_inner_proc_unlock(target_proc);
5694
5695
if (info->timeout_ms > 0)
5696
ret = wait_event_interruptible_timeout(
5697
target_proc->freeze_wait,
5698
(!target_proc->outstanding_txns),
5699
msecs_to_jiffies(info->timeout_ms));
5700
5701
/* Check pending transactions that wait for reply */
5702
if (ret >= 0) {
5703
binder_inner_proc_lock(target_proc);
5704
if (binder_txns_pending_ilocked(target_proc))
5705
ret = -EAGAIN;
5706
binder_inner_proc_unlock(target_proc);
5707
}
5708
5709
if (ret < 0) {
5710
binder_inner_proc_lock(target_proc);
5711
target_proc->is_frozen = false;
5712
binder_inner_proc_unlock(target_proc);
5713
} else {
5714
binder_add_freeze_work(target_proc, true);
5715
}
5716
5717
return ret;
5718
}
5719
5720
static int binder_ioctl_get_freezer_info(
5721
struct binder_frozen_status_info *info)
5722
{
5723
struct binder_proc *target_proc;
5724
bool found = false;
5725
__u32 txns_pending;
5726
5727
info->sync_recv = 0;
5728
info->async_recv = 0;
5729
5730
mutex_lock(&binder_procs_lock);
5731
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5732
if (target_proc->pid == info->pid) {
5733
found = true;
5734
binder_inner_proc_lock(target_proc);
5735
txns_pending = binder_txns_pending_ilocked(target_proc);
5736
info->sync_recv |= target_proc->sync_recv |
5737
(txns_pending << 1);
5738
info->async_recv |= target_proc->async_recv;
5739
binder_inner_proc_unlock(target_proc);
5740
}
5741
}
5742
mutex_unlock(&binder_procs_lock);
5743
5744
if (!found)
5745
return -EINVAL;
5746
5747
return 0;
5748
}
5749
5750
static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5751
void __user *ubuf)
5752
{
5753
struct binder_extended_error ee;
5754
5755
binder_inner_proc_lock(thread->proc);
5756
ee = thread->ee;
5757
binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5758
binder_inner_proc_unlock(thread->proc);
5759
5760
if (copy_to_user(ubuf, &ee, sizeof(ee)))
5761
return -EFAULT;
5762
5763
return 0;
5764
}
5765
5766
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5767
{
5768
int ret;
5769
struct binder_proc *proc = filp->private_data;
5770
struct binder_thread *thread;
5771
void __user *ubuf = (void __user *)arg;
5772
5773
trace_binder_ioctl(cmd, arg);
5774
5775
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5776
if (ret)
5777
goto err_unlocked;
5778
5779
thread = binder_get_thread(proc);
5780
if (thread == NULL) {
5781
ret = -ENOMEM;
5782
goto err;
5783
}
5784
5785
switch (cmd) {
5786
case BINDER_WRITE_READ:
5787
ret = binder_ioctl_write_read(filp, arg, thread);
5788
if (ret)
5789
goto err;
5790
break;
5791
case BINDER_SET_MAX_THREADS: {
5792
u32 max_threads;
5793
5794
if (copy_from_user(&max_threads, ubuf,
5795
sizeof(max_threads))) {
5796
ret = -EINVAL;
5797
goto err;
5798
}
5799
binder_inner_proc_lock(proc);
5800
proc->max_threads = max_threads;
5801
binder_inner_proc_unlock(proc);
5802
break;
5803
}
5804
case BINDER_SET_CONTEXT_MGR_EXT: {
5805
struct flat_binder_object fbo;
5806
5807
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5808
ret = -EINVAL;
5809
goto err;
5810
}
5811
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5812
if (ret)
5813
goto err;
5814
break;
5815
}
5816
case BINDER_SET_CONTEXT_MGR:
5817
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5818
if (ret)
5819
goto err;
5820
break;
5821
case BINDER_THREAD_EXIT:
5822
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5823
proc->pid, thread->pid);
5824
binder_thread_release(proc, thread);
5825
thread = NULL;
5826
break;
5827
case BINDER_VERSION: {
5828
struct binder_version __user *ver = ubuf;
5829
5830
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5831
&ver->protocol_version)) {
5832
ret = -EINVAL;
5833
goto err;
5834
}
5835
break;
5836
}
5837
case BINDER_GET_NODE_INFO_FOR_REF: {
5838
struct binder_node_info_for_ref info;
5839
5840
if (copy_from_user(&info, ubuf, sizeof(info))) {
5841
ret = -EFAULT;
5842
goto err;
5843
}
5844
5845
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5846
if (ret < 0)
5847
goto err;
5848
5849
if (copy_to_user(ubuf, &info, sizeof(info))) {
5850
ret = -EFAULT;
5851
goto err;
5852
}
5853
5854
break;
5855
}
5856
case BINDER_GET_NODE_DEBUG_INFO: {
5857
struct binder_node_debug_info info;
5858
5859
if (copy_from_user(&info, ubuf, sizeof(info))) {
5860
ret = -EFAULT;
5861
goto err;
5862
}
5863
5864
ret = binder_ioctl_get_node_debug_info(proc, &info);
5865
if (ret < 0)
5866
goto err;
5867
5868
if (copy_to_user(ubuf, &info, sizeof(info))) {
5869
ret = -EFAULT;
5870
goto err;
5871
}
5872
break;
5873
}
5874
case BINDER_FREEZE: {
5875
struct binder_freeze_info info;
5876
struct binder_proc **target_procs = NULL, *target_proc;
5877
int target_procs_count = 0, i = 0;
5878
5879
ret = 0;
5880
5881
if (copy_from_user(&info, ubuf, sizeof(info))) {
5882
ret = -EFAULT;
5883
goto err;
5884
}
5885
5886
mutex_lock(&binder_procs_lock);
5887
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5888
if (target_proc->pid == info.pid)
5889
target_procs_count++;
5890
}
5891
5892
if (target_procs_count == 0) {
5893
mutex_unlock(&binder_procs_lock);
5894
ret = -EINVAL;
5895
goto err;
5896
}
5897
5898
target_procs = kcalloc(target_procs_count,
5899
sizeof(struct binder_proc *),
5900
GFP_KERNEL);
5901
5902
if (!target_procs) {
5903
mutex_unlock(&binder_procs_lock);
5904
ret = -ENOMEM;
5905
goto err;
5906
}
5907
5908
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5909
if (target_proc->pid != info.pid)
5910
continue;
5911
5912
binder_inner_proc_lock(target_proc);
5913
target_proc->tmp_ref++;
5914
binder_inner_proc_unlock(target_proc);
5915
5916
target_procs[i++] = target_proc;
5917
}
5918
mutex_unlock(&binder_procs_lock);
5919
5920
for (i = 0; i < target_procs_count; i++) {
5921
if (ret >= 0)
5922
ret = binder_ioctl_freeze(&info,
5923
target_procs[i]);
5924
5925
binder_proc_dec_tmpref(target_procs[i]);
5926
}
5927
5928
kfree(target_procs);
5929
5930
if (ret < 0)
5931
goto err;
5932
break;
5933
}
5934
case BINDER_GET_FROZEN_INFO: {
5935
struct binder_frozen_status_info info;
5936
5937
if (copy_from_user(&info, ubuf, sizeof(info))) {
5938
ret = -EFAULT;
5939
goto err;
5940
}
5941
5942
ret = binder_ioctl_get_freezer_info(&info);
5943
if (ret < 0)
5944
goto err;
5945
5946
if (copy_to_user(ubuf, &info, sizeof(info))) {
5947
ret = -EFAULT;
5948
goto err;
5949
}
5950
break;
5951
}
5952
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5953
uint32_t enable;
5954
5955
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5956
ret = -EFAULT;
5957
goto err;
5958
}
5959
binder_inner_proc_lock(proc);
5960
proc->oneway_spam_detection_enabled = (bool)enable;
5961
binder_inner_proc_unlock(proc);
5962
break;
5963
}
5964
case BINDER_GET_EXTENDED_ERROR:
5965
ret = binder_ioctl_get_extended_error(thread, ubuf);
5966
if (ret < 0)
5967
goto err;
5968
break;
5969
default:
5970
ret = -EINVAL;
5971
goto err;
5972
}
5973
ret = 0;
5974
err:
5975
if (thread)
5976
thread->looper_need_return = false;
5977
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5978
if (ret && ret != -EINTR)
5979
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5980
err_unlocked:
5981
trace_binder_ioctl_done(ret);
5982
return ret;
5983
}
5984
5985
static void binder_vma_open(struct vm_area_struct *vma)
5986
{
5987
struct binder_proc *proc = vma->vm_private_data;
5988
5989
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5990
"%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5991
proc->pid, vma->vm_start, vma->vm_end,
5992
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5993
(unsigned long)pgprot_val(vma->vm_page_prot));
5994
}
5995
5996
static void binder_vma_close(struct vm_area_struct *vma)
5997
{
5998
struct binder_proc *proc = vma->vm_private_data;
5999
6000
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6001
"%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
6002
proc->pid, vma->vm_start, vma->vm_end,
6003
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6004
(unsigned long)pgprot_val(vma->vm_page_prot));
6005
binder_alloc_vma_close(&proc->alloc);
6006
}
6007
6008
VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
6009
{
6010
return VM_FAULT_SIGBUS;
6011
}
6012
EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
6013
6014
static const struct vm_operations_struct binder_vm_ops = {
6015
.open = binder_vma_open,
6016
.close = binder_vma_close,
6017
.fault = binder_vm_fault,
6018
};
6019
6020
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
6021
{
6022
struct binder_proc *proc = filp->private_data;
6023
6024
if (proc->tsk != current->group_leader)
6025
return -EINVAL;
6026
6027
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6028
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
6029
__func__, proc->pid, vma->vm_start, vma->vm_end,
6030
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
6031
(unsigned long)pgprot_val(vma->vm_page_prot));
6032
6033
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
6034
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
6035
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
6036
return -EPERM;
6037
}
6038
vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
6039
6040
vma->vm_ops = &binder_vm_ops;
6041
vma->vm_private_data = proc;
6042
6043
return binder_alloc_mmap_handler(&proc->alloc, vma);
6044
}
6045
6046
static int binder_open(struct inode *nodp, struct file *filp)
6047
{
6048
struct binder_proc *proc, *itr;
6049
struct binder_device *binder_dev;
6050
struct binderfs_info *info;
6051
struct dentry *binder_binderfs_dir_entry_proc = NULL;
6052
bool existing_pid = false;
6053
6054
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6055
current->group_leader->pid, current->pid);
6056
6057
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6058
if (proc == NULL)
6059
return -ENOMEM;
6060
6061
dbitmap_init(&proc->dmap);
6062
spin_lock_init(&proc->inner_lock);
6063
spin_lock_init(&proc->outer_lock);
6064
get_task_struct(current->group_leader);
6065
proc->tsk = current->group_leader;
6066
proc->cred = get_cred(filp->f_cred);
6067
INIT_LIST_HEAD(&proc->todo);
6068
init_waitqueue_head(&proc->freeze_wait);
6069
proc->default_priority = task_nice(current);
6070
/* binderfs stashes devices in i_private */
6071
if (is_binderfs_device(nodp)) {
6072
binder_dev = nodp->i_private;
6073
info = nodp->i_sb->s_fs_info;
6074
binder_binderfs_dir_entry_proc = info->proc_log_dir;
6075
} else {
6076
binder_dev = container_of(filp->private_data,
6077
struct binder_device, miscdev);
6078
}
6079
refcount_inc(&binder_dev->ref);
6080
proc->context = &binder_dev->context;
6081
binder_alloc_init(&proc->alloc);
6082
6083
binder_stats_created(BINDER_STAT_PROC);
6084
proc->pid = current->group_leader->pid;
6085
INIT_LIST_HEAD(&proc->delivered_death);
6086
INIT_LIST_HEAD(&proc->delivered_freeze);
6087
INIT_LIST_HEAD(&proc->waiting_threads);
6088
filp->private_data = proc;
6089
6090
mutex_lock(&binder_procs_lock);
6091
hlist_for_each_entry(itr, &binder_procs, proc_node) {
6092
if (itr->pid == proc->pid) {
6093
existing_pid = true;
6094
break;
6095
}
6096
}
6097
hlist_add_head(&proc->proc_node, &binder_procs);
6098
mutex_unlock(&binder_procs_lock);
6099
6100
if (binder_debugfs_dir_entry_proc && !existing_pid) {
6101
char strbuf[11];
6102
6103
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6104
/*
6105
* proc debug entries are shared between contexts.
6106
* Only create for the first PID to avoid debugfs log spamming
6107
* The printing code will anyway print all contexts for a given
6108
* PID so this is not a problem.
6109
*/
6110
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6111
binder_debugfs_dir_entry_proc,
6112
(void *)(unsigned long)proc->pid,
6113
&proc_fops);
6114
}
6115
6116
if (binder_binderfs_dir_entry_proc && !existing_pid) {
6117
char strbuf[11];
6118
struct dentry *binderfs_entry;
6119
6120
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6121
/*
6122
* Similar to debugfs, the process specific log file is shared
6123
* between contexts. Only create for the first PID.
6124
* This is ok since same as debugfs, the log file will contain
6125
* information on all contexts of a given PID.
6126
*/
6127
binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6128
strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6129
if (!IS_ERR(binderfs_entry)) {
6130
proc->binderfs_entry = binderfs_entry;
6131
} else {
6132
int error;
6133
6134
error = PTR_ERR(binderfs_entry);
6135
pr_warn("Unable to create file %s in binderfs (error %d)\n",
6136
strbuf, error);
6137
}
6138
}
6139
6140
return 0;
6141
}
6142
6143
static int binder_flush(struct file *filp, fl_owner_t id)
6144
{
6145
struct binder_proc *proc = filp->private_data;
6146
6147
binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6148
6149
return 0;
6150
}
6151
6152
static void binder_deferred_flush(struct binder_proc *proc)
6153
{
6154
struct rb_node *n;
6155
int wake_count = 0;
6156
6157
binder_inner_proc_lock(proc);
6158
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6159
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6160
6161
thread->looper_need_return = true;
6162
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6163
wake_up_interruptible(&thread->wait);
6164
wake_count++;
6165
}
6166
}
6167
binder_inner_proc_unlock(proc);
6168
6169
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6170
"binder_flush: %d woke %d threads\n", proc->pid,
6171
wake_count);
6172
}
6173
6174
static int binder_release(struct inode *nodp, struct file *filp)
6175
{
6176
struct binder_proc *proc = filp->private_data;
6177
6178
debugfs_remove(proc->debugfs_entry);
6179
6180
if (proc->binderfs_entry) {
6181
simple_recursive_removal(proc->binderfs_entry, NULL);
6182
proc->binderfs_entry = NULL;
6183
}
6184
6185
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6186
6187
return 0;
6188
}
6189
6190
static int binder_node_release(struct binder_node *node, int refs)
6191
{
6192
struct binder_ref *ref;
6193
int death = 0;
6194
struct binder_proc *proc = node->proc;
6195
6196
binder_release_work(proc, &node->async_todo);
6197
6198
binder_node_lock(node);
6199
binder_inner_proc_lock(proc);
6200
binder_dequeue_work_ilocked(&node->work);
6201
/*
6202
* The caller must have taken a temporary ref on the node,
6203
*/
6204
BUG_ON(!node->tmp_refs);
6205
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6206
binder_inner_proc_unlock(proc);
6207
binder_node_unlock(node);
6208
binder_free_node(node);
6209
6210
return refs;
6211
}
6212
6213
node->proc = NULL;
6214
node->local_strong_refs = 0;
6215
node->local_weak_refs = 0;
6216
binder_inner_proc_unlock(proc);
6217
6218
spin_lock(&binder_dead_nodes_lock);
6219
hlist_add_head(&node->dead_node, &binder_dead_nodes);
6220
spin_unlock(&binder_dead_nodes_lock);
6221
6222
hlist_for_each_entry(ref, &node->refs, node_entry) {
6223
refs++;
6224
/*
6225
* Need the node lock to synchronize
6226
* with new notification requests and the
6227
* inner lock to synchronize with queued
6228
* death notifications.
6229
*/
6230
binder_inner_proc_lock(ref->proc);
6231
if (!ref->death) {
6232
binder_inner_proc_unlock(ref->proc);
6233
continue;
6234
}
6235
6236
death++;
6237
6238
BUG_ON(!list_empty(&ref->death->work.entry));
6239
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6240
binder_enqueue_work_ilocked(&ref->death->work,
6241
&ref->proc->todo);
6242
binder_wakeup_proc_ilocked(ref->proc);
6243
binder_inner_proc_unlock(ref->proc);
6244
}
6245
6246
binder_debug(BINDER_DEBUG_DEAD_BINDER,
6247
"node %d now dead, refs %d, death %d\n",
6248
node->debug_id, refs, death);
6249
binder_node_unlock(node);
6250
binder_put_node(node);
6251
6252
return refs;
6253
}
6254
6255
static void binder_deferred_release(struct binder_proc *proc)
6256
{
6257
struct binder_context *context = proc->context;
6258
struct rb_node *n;
6259
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6260
6261
mutex_lock(&binder_procs_lock);
6262
hlist_del(&proc->proc_node);
6263
mutex_unlock(&binder_procs_lock);
6264
6265
mutex_lock(&context->context_mgr_node_lock);
6266
if (context->binder_context_mgr_node &&
6267
context->binder_context_mgr_node->proc == proc) {
6268
binder_debug(BINDER_DEBUG_DEAD_BINDER,
6269
"%s: %d context_mgr_node gone\n",
6270
__func__, proc->pid);
6271
context->binder_context_mgr_node = NULL;
6272
}
6273
mutex_unlock(&context->context_mgr_node_lock);
6274
binder_inner_proc_lock(proc);
6275
/*
6276
* Make sure proc stays alive after we
6277
* remove all the threads
6278
*/
6279
proc->tmp_ref++;
6280
6281
proc->is_dead = true;
6282
proc->is_frozen = false;
6283
proc->sync_recv = false;
6284
proc->async_recv = false;
6285
threads = 0;
6286
active_transactions = 0;
6287
while ((n = rb_first(&proc->threads))) {
6288
struct binder_thread *thread;
6289
6290
thread = rb_entry(n, struct binder_thread, rb_node);
6291
binder_inner_proc_unlock(proc);
6292
threads++;
6293
active_transactions += binder_thread_release(proc, thread);
6294
binder_inner_proc_lock(proc);
6295
}
6296
6297
nodes = 0;
6298
incoming_refs = 0;
6299
while ((n = rb_first(&proc->nodes))) {
6300
struct binder_node *node;
6301
6302
node = rb_entry(n, struct binder_node, rb_node);
6303
nodes++;
6304
/*
6305
* take a temporary ref on the node before
6306
* calling binder_node_release() which will either
6307
* kfree() the node or call binder_put_node()
6308
*/
6309
binder_inc_node_tmpref_ilocked(node);
6310
rb_erase(&node->rb_node, &proc->nodes);
6311
binder_inner_proc_unlock(proc);
6312
incoming_refs = binder_node_release(node, incoming_refs);
6313
binder_inner_proc_lock(proc);
6314
}
6315
binder_inner_proc_unlock(proc);
6316
6317
outgoing_refs = 0;
6318
binder_proc_lock(proc);
6319
while ((n = rb_first(&proc->refs_by_desc))) {
6320
struct binder_ref *ref;
6321
6322
ref = rb_entry(n, struct binder_ref, rb_node_desc);
6323
outgoing_refs++;
6324
binder_cleanup_ref_olocked(ref);
6325
binder_proc_unlock(proc);
6326
binder_free_ref(ref);
6327
binder_proc_lock(proc);
6328
}
6329
binder_proc_unlock(proc);
6330
6331
binder_release_work(proc, &proc->todo);
6332
binder_release_work(proc, &proc->delivered_death);
6333
binder_release_work(proc, &proc->delivered_freeze);
6334
6335
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6336
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6337
__func__, proc->pid, threads, nodes, incoming_refs,
6338
outgoing_refs, active_transactions);
6339
6340
binder_proc_dec_tmpref(proc);
6341
}
6342
6343
static void binder_deferred_func(struct work_struct *work)
6344
{
6345
struct binder_proc *proc;
6346
6347
int defer;
6348
6349
do {
6350
mutex_lock(&binder_deferred_lock);
6351
if (!hlist_empty(&binder_deferred_list)) {
6352
proc = hlist_entry(binder_deferred_list.first,
6353
struct binder_proc, deferred_work_node);
6354
hlist_del_init(&proc->deferred_work_node);
6355
defer = proc->deferred_work;
6356
proc->deferred_work = 0;
6357
} else {
6358
proc = NULL;
6359
defer = 0;
6360
}
6361
mutex_unlock(&binder_deferred_lock);
6362
6363
if (defer & BINDER_DEFERRED_FLUSH)
6364
binder_deferred_flush(proc);
6365
6366
if (defer & BINDER_DEFERRED_RELEASE)
6367
binder_deferred_release(proc); /* frees proc */
6368
} while (proc);
6369
}
6370
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6371
6372
static void
6373
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6374
{
6375
guard(mutex)(&binder_deferred_lock);
6376
proc->deferred_work |= defer;
6377
if (hlist_unhashed(&proc->deferred_work_node)) {
6378
hlist_add_head(&proc->deferred_work_node,
6379
&binder_deferred_list);
6380
schedule_work(&binder_deferred_work);
6381
}
6382
}
6383
6384
static void print_binder_transaction_ilocked(struct seq_file *m,
6385
struct binder_proc *proc,
6386
const char *prefix,
6387
struct binder_transaction *t)
6388
{
6389
struct binder_proc *to_proc;
6390
struct binder_buffer *buffer = t->buffer;
6391
ktime_t current_time = ktime_get();
6392
6393
spin_lock(&t->lock);
6394
to_proc = t->to_proc;
6395
seq_printf(m,
6396
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
6397
prefix, t->debug_id, t,
6398
t->from_pid,
6399
t->from_tid,
6400
to_proc ? to_proc->pid : 0,
6401
t->to_thread ? t->to_thread->pid : 0,
6402
t->code, t->flags, t->priority, t->is_async, t->is_reply,
6403
ktime_ms_delta(current_time, t->start_time));
6404
spin_unlock(&t->lock);
6405
6406
if (proc != to_proc) {
6407
/*
6408
* Can only safely deref buffer if we are holding the
6409
* correct proc inner lock for this node
6410
*/
6411
seq_puts(m, "\n");
6412
return;
6413
}
6414
6415
if (buffer == NULL) {
6416
seq_puts(m, " buffer free\n");
6417
return;
6418
}
6419
if (buffer->target_node)
6420
seq_printf(m, " node %d", buffer->target_node->debug_id);
6421
seq_printf(m, " size %zd:%zd offset %lx\n",
6422
buffer->data_size, buffer->offsets_size,
6423
buffer->user_data - proc->alloc.vm_start);
6424
}
6425
6426
static void print_binder_work_ilocked(struct seq_file *m,
6427
struct binder_proc *proc,
6428
const char *prefix,
6429
const char *transaction_prefix,
6430
struct binder_work *w, bool hash_ptrs)
6431
{
6432
struct binder_node *node;
6433
struct binder_transaction *t;
6434
6435
switch (w->type) {
6436
case BINDER_WORK_TRANSACTION:
6437
t = container_of(w, struct binder_transaction, work);
6438
print_binder_transaction_ilocked(
6439
m, proc, transaction_prefix, t);
6440
break;
6441
case BINDER_WORK_RETURN_ERROR: {
6442
struct binder_error *e = container_of(
6443
w, struct binder_error, work);
6444
6445
seq_printf(m, "%stransaction error: %u\n",
6446
prefix, e->cmd);
6447
} break;
6448
case BINDER_WORK_TRANSACTION_COMPLETE:
6449
seq_printf(m, "%stransaction complete\n", prefix);
6450
break;
6451
case BINDER_WORK_NODE:
6452
node = container_of(w, struct binder_node, work);
6453
if (hash_ptrs)
6454
seq_printf(m, "%snode work %d: u%p c%p\n",
6455
prefix, node->debug_id,
6456
(void *)(long)node->ptr,
6457
(void *)(long)node->cookie);
6458
else
6459
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6460
prefix, node->debug_id,
6461
(u64)node->ptr, (u64)node->cookie);
6462
break;
6463
case BINDER_WORK_DEAD_BINDER:
6464
seq_printf(m, "%shas dead binder\n", prefix);
6465
break;
6466
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6467
seq_printf(m, "%shas cleared dead binder\n", prefix);
6468
break;
6469
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6470
seq_printf(m, "%shas cleared death notification\n", prefix);
6471
break;
6472
case BINDER_WORK_FROZEN_BINDER:
6473
seq_printf(m, "%shas frozen binder\n", prefix);
6474
break;
6475
case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6476
seq_printf(m, "%shas cleared freeze notification\n", prefix);
6477
break;
6478
default:
6479
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6480
break;
6481
}
6482
}
6483
6484
static void print_binder_thread_ilocked(struct seq_file *m,
6485
struct binder_thread *thread,
6486
bool print_always, bool hash_ptrs)
6487
{
6488
struct binder_transaction *t;
6489
struct binder_work *w;
6490
size_t start_pos = m->count;
6491
size_t header_pos;
6492
6493
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6494
thread->pid, thread->looper,
6495
thread->looper_need_return,
6496
atomic_read(&thread->tmp_ref));
6497
header_pos = m->count;
6498
t = thread->transaction_stack;
6499
while (t) {
6500
if (t->from == thread) {
6501
print_binder_transaction_ilocked(m, thread->proc,
6502
" outgoing transaction", t);
6503
t = t->from_parent;
6504
} else if (t->to_thread == thread) {
6505
print_binder_transaction_ilocked(m, thread->proc,
6506
" incoming transaction", t);
6507
t = t->to_parent;
6508
} else {
6509
print_binder_transaction_ilocked(m, thread->proc,
6510
" bad transaction", t);
6511
t = NULL;
6512
}
6513
}
6514
list_for_each_entry(w, &thread->todo, entry) {
6515
print_binder_work_ilocked(m, thread->proc, " ",
6516
" pending transaction",
6517
w, hash_ptrs);
6518
}
6519
if (!print_always && m->count == header_pos)
6520
m->count = start_pos;
6521
}
6522
6523
static void print_binder_node_nilocked(struct seq_file *m,
6524
struct binder_node *node,
6525
bool hash_ptrs)
6526
{
6527
struct binder_ref *ref;
6528
struct binder_work *w;
6529
int count;
6530
6531
count = hlist_count_nodes(&node->refs);
6532
6533
if (hash_ptrs)
6534
seq_printf(m, " node %d: u%p c%p", node->debug_id,
6535
(void *)(long)node->ptr, (void *)(long)node->cookie);
6536
else
6537
seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
6538
(u64)node->ptr, (u64)node->cookie);
6539
seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6540
node->has_strong_ref, node->has_weak_ref,
6541
node->local_strong_refs, node->local_weak_refs,
6542
node->internal_strong_refs, count, node->tmp_refs);
6543
if (count) {
6544
seq_puts(m, " proc");
6545
hlist_for_each_entry(ref, &node->refs, node_entry)
6546
seq_printf(m, " %d", ref->proc->pid);
6547
}
6548
seq_puts(m, "\n");
6549
if (node->proc) {
6550
list_for_each_entry(w, &node->async_todo, entry)
6551
print_binder_work_ilocked(m, node->proc, " ",
6552
" pending async transaction",
6553
w, hash_ptrs);
6554
}
6555
}
6556
6557
static void print_binder_ref_olocked(struct seq_file *m,
6558
struct binder_ref *ref)
6559
{
6560
binder_node_lock(ref->node);
6561
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6562
ref->data.debug_id, ref->data.desc,
6563
ref->node->proc ? "" : "dead ",
6564
ref->node->debug_id, ref->data.strong,
6565
ref->data.weak, ref->death);
6566
binder_node_unlock(ref->node);
6567
}
6568
6569
/**
6570
* print_next_binder_node_ilocked() - Print binder_node from a locked list
6571
* @m: struct seq_file for output via seq_printf()
6572
* @proc: struct binder_proc we hold the inner_proc_lock to (if any)
6573
* @node: struct binder_node to print fields of
6574
* @prev_node: struct binder_node we hold a temporary reference to (if any)
6575
* @hash_ptrs: whether to hash @node's binder_uintptr_t fields
6576
*
6577
* Helper function to handle synchronization around printing a struct
6578
* binder_node while iterating through @proc->nodes or the dead nodes list.
6579
* Caller must hold either @proc->inner_lock (for live nodes) or
6580
* binder_dead_nodes_lock. This lock will be released during the body of this
6581
* function, but it will be reacquired before returning to the caller.
6582
*
6583
* Return: pointer to the struct binder_node we hold a tmpref on
6584
*/
6585
static struct binder_node *
6586
print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
6587
struct binder_node *node,
6588
struct binder_node *prev_node, bool hash_ptrs)
6589
{
6590
/*
6591
* Take a temporary reference on the node so that isn't freed while
6592
* we print it.
6593
*/
6594
binder_inc_node_tmpref_ilocked(node);
6595
/*
6596
* Live nodes need to drop the inner proc lock and dead nodes need to
6597
* drop the binder_dead_nodes_lock before trying to take the node lock.
6598
*/
6599
if (proc)
6600
binder_inner_proc_unlock(proc);
6601
else
6602
spin_unlock(&binder_dead_nodes_lock);
6603
if (prev_node)
6604
binder_put_node(prev_node);
6605
binder_node_inner_lock(node);
6606
print_binder_node_nilocked(m, node, hash_ptrs);
6607
binder_node_inner_unlock(node);
6608
if (proc)
6609
binder_inner_proc_lock(proc);
6610
else
6611
spin_lock(&binder_dead_nodes_lock);
6612
return node;
6613
}
6614
6615
static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
6616
bool print_all, bool hash_ptrs)
6617
{
6618
struct binder_work *w;
6619
struct rb_node *n;
6620
size_t start_pos = m->count;
6621
size_t header_pos;
6622
struct binder_node *last_node = NULL;
6623
6624
seq_printf(m, "proc %d\n", proc->pid);
6625
seq_printf(m, "context %s\n", proc->context->name);
6626
header_pos = m->count;
6627
6628
binder_inner_proc_lock(proc);
6629
for (n = rb_first(&proc->threads); n; n = rb_next(n))
6630
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6631
rb_node), print_all, hash_ptrs);
6632
6633
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
6634
struct binder_node *node = rb_entry(n, struct binder_node,
6635
rb_node);
6636
if (!print_all && !node->has_async_transaction)
6637
continue;
6638
6639
last_node = print_next_binder_node_ilocked(m, proc, node,
6640
last_node,
6641
hash_ptrs);
6642
}
6643
binder_inner_proc_unlock(proc);
6644
if (last_node)
6645
binder_put_node(last_node);
6646
6647
if (print_all) {
6648
binder_proc_lock(proc);
6649
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
6650
print_binder_ref_olocked(m, rb_entry(n,
6651
struct binder_ref,
6652
rb_node_desc));
6653
binder_proc_unlock(proc);
6654
}
6655
binder_alloc_print_allocated(m, &proc->alloc);
6656
binder_inner_proc_lock(proc);
6657
list_for_each_entry(w, &proc->todo, entry)
6658
print_binder_work_ilocked(m, proc, " ",
6659
" pending transaction", w,
6660
hash_ptrs);
6661
list_for_each_entry(w, &proc->delivered_death, entry) {
6662
seq_puts(m, " has delivered dead binder\n");
6663
break;
6664
}
6665
list_for_each_entry(w, &proc->delivered_freeze, entry) {
6666
seq_puts(m, " has delivered freeze binder\n");
6667
break;
6668
}
6669
binder_inner_proc_unlock(proc);
6670
if (!print_all && m->count == header_pos)
6671
m->count = start_pos;
6672
}
6673
6674
static const char * const binder_return_strings[] = {
6675
"BR_ERROR",
6676
"BR_OK",
6677
"BR_TRANSACTION",
6678
"BR_REPLY",
6679
"BR_ACQUIRE_RESULT",
6680
"BR_DEAD_REPLY",
6681
"BR_TRANSACTION_COMPLETE",
6682
"BR_INCREFS",
6683
"BR_ACQUIRE",
6684
"BR_RELEASE",
6685
"BR_DECREFS",
6686
"BR_ATTEMPT_ACQUIRE",
6687
"BR_NOOP",
6688
"BR_SPAWN_LOOPER",
6689
"BR_FINISHED",
6690
"BR_DEAD_BINDER",
6691
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6692
"BR_FAILED_REPLY",
6693
"BR_FROZEN_REPLY",
6694
"BR_ONEWAY_SPAM_SUSPECT",
6695
"BR_TRANSACTION_PENDING_FROZEN",
6696
"BR_FROZEN_BINDER",
6697
"BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6698
};
6699
6700
static const char * const binder_command_strings[] = {
6701
"BC_TRANSACTION",
6702
"BC_REPLY",
6703
"BC_ACQUIRE_RESULT",
6704
"BC_FREE_BUFFER",
6705
"BC_INCREFS",
6706
"BC_ACQUIRE",
6707
"BC_RELEASE",
6708
"BC_DECREFS",
6709
"BC_INCREFS_DONE",
6710
"BC_ACQUIRE_DONE",
6711
"BC_ATTEMPT_ACQUIRE",
6712
"BC_REGISTER_LOOPER",
6713
"BC_ENTER_LOOPER",
6714
"BC_EXIT_LOOPER",
6715
"BC_REQUEST_DEATH_NOTIFICATION",
6716
"BC_CLEAR_DEATH_NOTIFICATION",
6717
"BC_DEAD_BINDER_DONE",
6718
"BC_TRANSACTION_SG",
6719
"BC_REPLY_SG",
6720
"BC_REQUEST_FREEZE_NOTIFICATION",
6721
"BC_CLEAR_FREEZE_NOTIFICATION",
6722
"BC_FREEZE_NOTIFICATION_DONE",
6723
};
6724
6725
static const char * const binder_objstat_strings[] = {
6726
"proc",
6727
"thread",
6728
"node",
6729
"ref",
6730
"death",
6731
"transaction",
6732
"transaction_complete",
6733
"freeze",
6734
};
6735
6736
static void print_binder_stats(struct seq_file *m, const char *prefix,
6737
struct binder_stats *stats)
6738
{
6739
int i;
6740
6741
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6742
ARRAY_SIZE(binder_command_strings));
6743
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6744
int temp = atomic_read(&stats->bc[i]);
6745
6746
if (temp)
6747
seq_printf(m, "%s%s: %d\n", prefix,
6748
binder_command_strings[i], temp);
6749
}
6750
6751
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6752
ARRAY_SIZE(binder_return_strings));
6753
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6754
int temp = atomic_read(&stats->br[i]);
6755
6756
if (temp)
6757
seq_printf(m, "%s%s: %d\n", prefix,
6758
binder_return_strings[i], temp);
6759
}
6760
6761
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6762
ARRAY_SIZE(binder_objstat_strings));
6763
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6764
ARRAY_SIZE(stats->obj_deleted));
6765
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6766
int created = atomic_read(&stats->obj_created[i]);
6767
int deleted = atomic_read(&stats->obj_deleted[i]);
6768
6769
if (created || deleted)
6770
seq_printf(m, "%s%s: active %d total %d\n",
6771
prefix,
6772
binder_objstat_strings[i],
6773
created - deleted,
6774
created);
6775
}
6776
}
6777
6778
static void print_binder_proc_stats(struct seq_file *m,
6779
struct binder_proc *proc)
6780
{
6781
struct binder_work *w;
6782
struct binder_thread *thread;
6783
struct rb_node *n;
6784
int count, strong, weak, ready_threads;
6785
size_t free_async_space =
6786
binder_alloc_get_free_async_space(&proc->alloc);
6787
6788
seq_printf(m, "proc %d\n", proc->pid);
6789
seq_printf(m, "context %s\n", proc->context->name);
6790
count = 0;
6791
ready_threads = 0;
6792
binder_inner_proc_lock(proc);
6793
for (n = rb_first(&proc->threads); n; n = rb_next(n))
6794
count++;
6795
6796
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6797
ready_threads++;
6798
6799
seq_printf(m, " threads: %d\n", count);
6800
seq_printf(m, " requested threads: %d+%d/%d\n"
6801
" ready threads %d\n"
6802
" free async space %zd\n", proc->requested_threads,
6803
proc->requested_threads_started, proc->max_threads,
6804
ready_threads,
6805
free_async_space);
6806
count = 0;
6807
for (n = rb_first(&proc->nodes); n; n = rb_next(n))
6808
count++;
6809
binder_inner_proc_unlock(proc);
6810
seq_printf(m, " nodes: %d\n", count);
6811
count = 0;
6812
strong = 0;
6813
weak = 0;
6814
binder_proc_lock(proc);
6815
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
6816
struct binder_ref *ref = rb_entry(n, struct binder_ref,
6817
rb_node_desc);
6818
count++;
6819
strong += ref->data.strong;
6820
weak += ref->data.weak;
6821
}
6822
binder_proc_unlock(proc);
6823
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6824
6825
count = binder_alloc_get_allocated_count(&proc->alloc);
6826
seq_printf(m, " buffers: %d\n", count);
6827
6828
binder_alloc_print_pages(m, &proc->alloc);
6829
6830
count = 0;
6831
binder_inner_proc_lock(proc);
6832
list_for_each_entry(w, &proc->todo, entry) {
6833
if (w->type == BINDER_WORK_TRANSACTION)
6834
count++;
6835
}
6836
binder_inner_proc_unlock(proc);
6837
seq_printf(m, " pending transactions: %d\n", count);
6838
6839
print_binder_stats(m, " ", &proc->stats);
6840
}
6841
6842
static void print_binder_state(struct seq_file *m, bool hash_ptrs)
6843
{
6844
struct binder_proc *proc;
6845
struct binder_node *node;
6846
struct binder_node *last_node = NULL;
6847
6848
seq_puts(m, "binder state:\n");
6849
6850
spin_lock(&binder_dead_nodes_lock);
6851
if (!hlist_empty(&binder_dead_nodes))
6852
seq_puts(m, "dead nodes:\n");
6853
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
6854
last_node = print_next_binder_node_ilocked(m, NULL, node,
6855
last_node,
6856
hash_ptrs);
6857
spin_unlock(&binder_dead_nodes_lock);
6858
if (last_node)
6859
binder_put_node(last_node);
6860
6861
mutex_lock(&binder_procs_lock);
6862
hlist_for_each_entry(proc, &binder_procs, proc_node)
6863
print_binder_proc(m, proc, true, hash_ptrs);
6864
mutex_unlock(&binder_procs_lock);
6865
}
6866
6867
static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
6868
{
6869
struct binder_proc *proc;
6870
6871
seq_puts(m, "binder transactions:\n");
6872
mutex_lock(&binder_procs_lock);
6873
hlist_for_each_entry(proc, &binder_procs, proc_node)
6874
print_binder_proc(m, proc, false, hash_ptrs);
6875
mutex_unlock(&binder_procs_lock);
6876
}
6877
6878
static int state_show(struct seq_file *m, void *unused)
6879
{
6880
print_binder_state(m, false);
6881
return 0;
6882
}
6883
6884
static int state_hashed_show(struct seq_file *m, void *unused)
6885
{
6886
print_binder_state(m, true);
6887
return 0;
6888
}
6889
6890
static int stats_show(struct seq_file *m, void *unused)
6891
{
6892
struct binder_proc *proc;
6893
6894
seq_puts(m, "binder stats:\n");
6895
6896
print_binder_stats(m, "", &binder_stats);
6897
6898
mutex_lock(&binder_procs_lock);
6899
hlist_for_each_entry(proc, &binder_procs, proc_node)
6900
print_binder_proc_stats(m, proc);
6901
mutex_unlock(&binder_procs_lock);
6902
6903
return 0;
6904
}
6905
6906
static int transactions_show(struct seq_file *m, void *unused)
6907
{
6908
print_binder_transactions(m, false);
6909
return 0;
6910
}
6911
6912
static int transactions_hashed_show(struct seq_file *m, void *unused)
6913
{
6914
print_binder_transactions(m, true);
6915
return 0;
6916
}
6917
6918
static int proc_show(struct seq_file *m, void *unused)
6919
{
6920
struct binder_proc *itr;
6921
int pid = (unsigned long)m->private;
6922
6923
guard(mutex)(&binder_procs_lock);
6924
hlist_for_each_entry(itr, &binder_procs, proc_node) {
6925
if (itr->pid == pid) {
6926
seq_puts(m, "binder proc state:\n");
6927
print_binder_proc(m, itr, true, false);
6928
}
6929
}
6930
6931
return 0;
6932
}
6933
6934
static void print_binder_transaction_log_entry(struct seq_file *m,
6935
struct binder_transaction_log_entry *e)
6936
{
6937
int debug_id = READ_ONCE(e->debug_id_done);
6938
/*
6939
* read barrier to guarantee debug_id_done read before
6940
* we print the log values
6941
*/
6942
smp_rmb();
6943
seq_printf(m,
6944
"%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6945
e->debug_id, (e->call_type == 2) ? "reply" :
6946
((e->call_type == 1) ? "async" : "call "), e->from_proc,
6947
e->from_thread, e->to_proc, e->to_thread, e->context_name,
6948
e->to_node, e->target_handle, e->data_size, e->offsets_size,
6949
e->return_error, e->return_error_param,
6950
e->return_error_line);
6951
/*
6952
* read-barrier to guarantee read of debug_id_done after
6953
* done printing the fields of the entry
6954
*/
6955
smp_rmb();
6956
seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6957
"\n" : " (incomplete)\n");
6958
}
6959
6960
static int transaction_log_show(struct seq_file *m, void *unused)
6961
{
6962
struct binder_transaction_log *log = m->private;
6963
unsigned int log_cur = atomic_read(&log->cur);
6964
unsigned int count;
6965
unsigned int cur;
6966
int i;
6967
6968
count = log_cur + 1;
6969
cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6970
0 : count % ARRAY_SIZE(log->entry);
6971
if (count > ARRAY_SIZE(log->entry) || log->full)
6972
count = ARRAY_SIZE(log->entry);
6973
for (i = 0; i < count; i++) {
6974
unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6975
6976
print_binder_transaction_log_entry(m, &log->entry[index]);
6977
}
6978
return 0;
6979
}
6980
6981
const struct file_operations binder_fops = {
6982
.owner = THIS_MODULE,
6983
.poll = binder_poll,
6984
.unlocked_ioctl = binder_ioctl,
6985
.compat_ioctl = compat_ptr_ioctl,
6986
.mmap = binder_mmap,
6987
.open = binder_open,
6988
.flush = binder_flush,
6989
.release = binder_release,
6990
};
6991
6992
DEFINE_SHOW_ATTRIBUTE(state);
6993
DEFINE_SHOW_ATTRIBUTE(state_hashed);
6994
DEFINE_SHOW_ATTRIBUTE(stats);
6995
DEFINE_SHOW_ATTRIBUTE(transactions);
6996
DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
6997
DEFINE_SHOW_ATTRIBUTE(transaction_log);
6998
6999
const struct binder_debugfs_entry binder_debugfs_entries[] = {
7000
{
7001
.name = "state",
7002
.mode = 0444,
7003
.fops = &state_fops,
7004
.data = NULL,
7005
},
7006
{
7007
.name = "state_hashed",
7008
.mode = 0444,
7009
.fops = &state_hashed_fops,
7010
.data = NULL,
7011
},
7012
{
7013
.name = "stats",
7014
.mode = 0444,
7015
.fops = &stats_fops,
7016
.data = NULL,
7017
},
7018
{
7019
.name = "transactions",
7020
.mode = 0444,
7021
.fops = &transactions_fops,
7022
.data = NULL,
7023
},
7024
{
7025
.name = "transactions_hashed",
7026
.mode = 0444,
7027
.fops = &transactions_hashed_fops,
7028
.data = NULL,
7029
},
7030
{
7031
.name = "transaction_log",
7032
.mode = 0444,
7033
.fops = &transaction_log_fops,
7034
.data = &binder_transaction_log,
7035
},
7036
{
7037
.name = "failed_transaction_log",
7038
.mode = 0444,
7039
.fops = &transaction_log_fops,
7040
.data = &binder_transaction_log_failed,
7041
},
7042
{} /* terminator */
7043
};
7044
7045
void binder_add_device(struct binder_device *device)
7046
{
7047
guard(spinlock)(&binder_devices_lock);
7048
hlist_add_head(&device->hlist, &binder_devices);
7049
}
7050
7051
void binder_remove_device(struct binder_device *device)
7052
{
7053
guard(spinlock)(&binder_devices_lock);
7054
hlist_del_init(&device->hlist);
7055
}
7056
7057
static int __init init_binder_device(const char *name)
7058
{
7059
int ret;
7060
struct binder_device *binder_device;
7061
7062
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
7063
if (!binder_device)
7064
return -ENOMEM;
7065
7066
binder_device->miscdev.fops = &binder_fops;
7067
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
7068
binder_device->miscdev.name = name;
7069
7070
refcount_set(&binder_device->ref, 1);
7071
binder_device->context.binder_context_mgr_uid = INVALID_UID;
7072
binder_device->context.name = name;
7073
mutex_init(&binder_device->context.context_mgr_node_lock);
7074
7075
ret = misc_register(&binder_device->miscdev);
7076
if (ret < 0) {
7077
kfree(binder_device);
7078
return ret;
7079
}
7080
7081
binder_add_device(binder_device);
7082
7083
return ret;
7084
}
7085
7086
static int __init binder_init(void)
7087
{
7088
int ret;
7089
char *device_name, *device_tmp;
7090
struct binder_device *device;
7091
struct hlist_node *tmp;
7092
char *device_names = NULL;
7093
const struct binder_debugfs_entry *db_entry;
7094
7095
ret = binder_alloc_shrinker_init();
7096
if (ret)
7097
return ret;
7098
7099
atomic_set(&binder_transaction_log.cur, ~0U);
7100
atomic_set(&binder_transaction_log_failed.cur, ~0U);
7101
7102
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
7103
7104
binder_for_each_debugfs_entry(db_entry)
7105
debugfs_create_file(db_entry->name,
7106
db_entry->mode,
7107
binder_debugfs_dir_entry_root,
7108
db_entry->data,
7109
db_entry->fops);
7110
7111
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
7112
binder_debugfs_dir_entry_root);
7113
7114
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
7115
strcmp(binder_devices_param, "") != 0) {
7116
/*
7117
* Copy the module_parameter string, because we don't want to
7118
* tokenize it in-place.
7119
*/
7120
device_names = kstrdup(binder_devices_param, GFP_KERNEL);
7121
if (!device_names) {
7122
ret = -ENOMEM;
7123
goto err_alloc_device_names_failed;
7124
}
7125
7126
device_tmp = device_names;
7127
while ((device_name = strsep(&device_tmp, ","))) {
7128
ret = init_binder_device(device_name);
7129
if (ret)
7130
goto err_init_binder_device_failed;
7131
}
7132
}
7133
7134
ret = genl_register_family(&binder_nl_family);
7135
if (ret)
7136
goto err_init_binder_device_failed;
7137
7138
ret = init_binderfs();
7139
if (ret)
7140
goto err_init_binderfs_failed;
7141
7142
return ret;
7143
7144
err_init_binderfs_failed:
7145
genl_unregister_family(&binder_nl_family);
7146
7147
err_init_binder_device_failed:
7148
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7149
misc_deregister(&device->miscdev);
7150
binder_remove_device(device);
7151
kfree(device);
7152
}
7153
7154
kfree(device_names);
7155
7156
err_alloc_device_names_failed:
7157
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7158
binder_alloc_shrinker_exit();
7159
7160
return ret;
7161
}
7162
7163
device_initcall(binder_init);
7164
7165
#define CREATE_TRACE_POINTS
7166
#include "binder_trace.h"
7167
7168