use core::mem::take;
use kernel::{
bindings,
cred::Credential,
error::Error,
fs::file::{self, File},
list::{List, ListArc, ListArcField, ListLinks},
mm,
prelude::*,
rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
seq_file::SeqFile,
seq_print,
sync::poll::PollTable,
sync::{
lock::{spinlock::SpinLockBackend, Guard},
Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
},
task::Task,
types::ARef,
uaccess::{UserSlice, UserSliceReader},
uapi,
workqueue::{self, Work},
};
use crate::{
allocation::{Allocation, AllocationInfo, NewAllocation},
context::Context,
defs::*,
error::{BinderError, BinderResult},
node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
page_range::ShrinkablePageRange,
range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
stats::BinderStats,
thread::{PushWorkRes, Thread},
BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
};
#[path = "freeze.rs"]
mod freeze;
use self::freeze::{FreezeCookie, FreezeListener};
struct Mapping {
address: usize,
alloc: RangeAllocator<AllocationInfo>,
}
impl Mapping {
fn new(address: usize, size: usize) -> Self {
Self {
address,
alloc: RangeAllocator::new(size),
}
}
}
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
pub(crate) struct ProcessInner {
is_manager: bool,
pub(crate) is_dead: bool,
threads: RBTree<i32, Arc<Thread>>,
ready_threads: List<Thread>,
nodes: RBTree<u64, DArc<Node>>,
mapping: Option<Mapping>,
work: List<DTRWrap<dyn DeliverToRead>>,
delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
requested_thread_count: u32,
max_threads: u32,
started_thread_count: u32,
defer_work: u8,
outstanding_txns: u32,
pub(crate) is_frozen: bool,
pub(crate) sync_recv: bool,
pub(crate) async_recv: bool,
pub(crate) binderfs_file: Option<BinderfsProcFile>,
oneway_spam_detection_enabled: bool,
}
impl ProcessInner {
fn new() -> Self {
Self {
is_manager: false,
is_dead: false,
threads: RBTree::new(),
ready_threads: List::new(),
mapping: None,
nodes: RBTree::new(),
work: List::new(),
delivered_deaths: List::new(),
requested_thread_count: 0,
max_threads: 0,
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
is_frozen: false,
sync_recv: false,
async_recv: false,
binderfs_file: None,
oneway_spam_detection_enabled: false,
}
}
pub(crate) fn push_work(
&mut self,
work: DLArc<dyn DeliverToRead>,
) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
if let Some(thread) = self.ready_threads.pop_front() {
match thread.push_work(work) {
PushWorkRes::Ok => Ok(()),
PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
}
} else if self.is_dead {
Err((BinderError::new_dead(), work))
} else {
let sync = work.should_sync_wakeup();
self.work.push_back(work);
for thread in self.threads.values() {
thread.notify_if_poll_ready(sync);
}
Ok(())
}
}
pub(crate) fn remove_node(&mut self, ptr: u64) {
self.nodes.remove(&ptr);
}
pub(crate) fn update_node_refcount(
&mut self,
node: &DArc<Node>,
inc: bool,
strong: bool,
count: usize,
othread: Option<&Thread>,
) {
let push = node.update_refcount_locked(inc, strong, count, self);
if let Some(node) = push {
if let Some(thread) = othread {
thread.push_work_deferred(node);
} else {
let _ = self.push_work(node);
}
}
}
pub(crate) fn new_node_ref(
&mut self,
node: DArc<Node>,
strong: bool,
thread: Option<&Thread>,
) -> NodeRef {
self.update_node_refcount(&node, true, strong, 1, thread);
let strong_count = if strong { 1 } else { 0 };
NodeRef::new(node, strong_count, 1 - strong_count)
}
pub(crate) fn new_node_ref_with_thread(
&mut self,
node: DArc<Node>,
strong: bool,
thread: &Thread,
wrapper: Option<CritIncrWrapper>,
) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
let push = match wrapper {
None => node
.incr_refcount_allow_zero2one(strong, self)?
.map(|node| node as _),
Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
};
if let Some(node) = push {
thread.push_work_deferred(node);
}
let strong_count = if strong { 1 } else { 0 };
Ok(NodeRef::new(node, strong_count, 1 - strong_count))
}
fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
match self.nodes.get(&ptr) {
None => Ok(None),
Some(node) => {
let (_, node_cookie) = node.get_id();
if node_cookie == cookie {
Ok(Some(node.clone()))
} else {
Err(EINVAL)
}
}
}
}
fn register_thread(&mut self) -> bool {
if self.requested_thread_count == 0 {
return false;
}
self.requested_thread_count -= 1;
self.started_thread_count += 1;
true
}
fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
let mut cursor = self.delivered_deaths.cursor_front();
while let Some(next) = cursor.peek_next() {
if next.cookie == cookie {
return Some(next.remove().into_arc());
}
cursor.move_next();
}
None
}
pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
if let Some(death) = ListArc::try_from_arc_or_drop(death) {
self.delivered_deaths.push_back(death);
} else {
pr_warn!("Notification added to `delivered_deaths` twice.");
}
}
pub(crate) fn add_outstanding_txn(&mut self) {
self.outstanding_txns += 1;
}
fn txns_pending_locked(&self) -> bool {
if self.outstanding_txns > 0 {
return true;
}
for thread in self.threads.values() {
if thread.has_current_transaction() {
return true;
}
}
false
}
}
#[pin_data]
pub(crate) struct NodeRefInfo {
debug_id: usize,
node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
#[pin]
links: ListLinks<{ Self::LIST_NODE }>,
handle: u32,
pub(crate) process: Arc<Process>,
}
impl NodeRefInfo {
pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
const LIST_PROC: u64 = 0xd703a5263dcc8650;
fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
pin_init!(Self {
debug_id: super::next_debug_id(),
node_ref: ListArcField::new(node_ref),
death: ListArcField::new(None),
freeze: ListArcField::new(None),
links <- ListLinks::new(),
handle,
process,
})
}
kernel::list::define_list_arc_field_getter! {
pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
}
}
kernel::list::impl_list_arc_safe! {
impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
}
kernel::list::impl_list_item! {
impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
using ListLinks { self.links };
}
}
struct ProcessNodeRefs {
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
by_node: RBTree<usize, u32>,
freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
}
impl ProcessNodeRefs {
fn new() -> Self {
Self {
by_handle: RBTree::new(),
by_node: RBTree::new(),
freeze_listeners: RBTree::new(),
}
}
}
#[pin_data]
pub(crate) struct Process {
pub(crate) ctx: Arc<Context>,
pub(crate) task: ARef<Task>,
pub(crate) cred: ARef<Credential>,
#[pin]
pub(crate) inner: SpinLock<ProcessInner>,
#[pin]
pub(crate) pages: ShrinkablePageRange,
#[pin]
freeze_wait: CondVar,
#[pin]
node_refs: Mutex<ProcessNodeRefs>,
#[pin]
defer_work: Work<Process>,
#[pin]
links: ListLinks,
pub(crate) stats: BinderStats,
}
kernel::impl_has_work! {
impl HasWork<Process> for Process { self.defer_work }
}
kernel::list::impl_list_arc_safe! {
impl ListArcSafe<0> for Process { untracked; }
}
kernel::list::impl_list_item! {
impl ListItem<0> for Process {
using ListLinks { self.links };
}
}
impl workqueue::WorkItem for Process {
type Pointer = Arc<Process>;
fn run(me: Arc<Self>) {
let defer;
{
let mut inner = me.inner.lock();
defer = inner.defer_work;
inner.defer_work = 0;
}
if defer & PROC_DEFER_FLUSH != 0 {
me.deferred_flush();
}
if defer & PROC_DEFER_RELEASE != 0 {
me.deferred_release();
}
}
}
impl Process {
fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
let current = kernel::current!();
let list_process = ListArc::pin_init::<Error>(
try_pin_init!(Process {
ctx,
cred,
inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
task: current.group_leader().into(),
defer_work <- kernel::new_work!("Process::defer_work"),
links <- ListLinks::new(),
stats: BinderStats::new(),
}),
GFP_KERNEL,
)?;
let process = list_process.clone_arc();
process.ctx.register_process(list_process);
Ok(process)
}
pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
self.task.tgid_nr_ns(None)
}
#[inline(never)]
pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
seq_print!(m, "proc {}\n", self.pid_in_current_ns());
seq_print!(m, "context {}\n", &*ctx.name);
let inner = self.inner.lock();
seq_print!(m, " threads: {}\n", inner.threads.iter().count());
seq_print!(
m,
" requested threads: {}+{}/{}\n",
inner.requested_thread_count,
inner.started_thread_count,
inner.max_threads,
);
if let Some(mapping) = &inner.mapping {
seq_print!(
m,
" free oneway space: {}\n",
mapping.alloc.free_oneway_space()
);
seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
}
seq_print!(
m,
" outstanding transactions: {}\n",
inner.outstanding_txns
);
seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
drop(inner);
{
let mut refs = self.node_refs.lock();
let (mut count, mut weak, mut strong) = (0, 0, 0);
for r in refs.by_handle.values_mut() {
let node_ref = r.node_ref();
let (nstrong, nweak) = node_ref.get_count();
count += 1;
weak += nweak;
strong += nstrong;
}
seq_print!(m, " refs: {count} s {strong} w {weak}\n");
}
self.stats.debug_print(" ", m);
Ok(())
}
#[inline(never)]
pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
seq_print!(m, "proc {}\n", self.pid_in_current_ns());
seq_print!(m, "context {}\n", &*ctx.name);
let mut all_threads = KVec::new();
let mut all_nodes = KVec::new();
loop {
let inner = self.inner.lock();
let num_threads = inner.threads.iter().count();
let num_nodes = inner.nodes.iter().count();
if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
drop(inner);
all_threads.reserve(num_threads, GFP_KERNEL)?;
all_nodes.reserve(num_nodes, GFP_KERNEL)?;
continue;
}
for thread in inner.threads.values() {
assert!(all_threads.len() < all_threads.capacity());
let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
}
for node in inner.nodes.values() {
assert!(all_nodes.len() < all_nodes.capacity());
let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
}
break;
}
for thread in all_threads {
thread.debug_print(m, print_all)?;
}
let mut inner = self.inner.lock();
for node in all_nodes {
if print_all || node.has_oneway_transaction(&mut inner) {
node.full_debug_print(m, &mut inner)?;
}
}
drop(inner);
if print_all {
let mut refs = self.node_refs.lock();
for r in refs.by_handle.values_mut() {
let node_ref = r.node_ref();
let dead = node_ref.node.owner.inner.lock().is_dead;
let (strong, weak) = node_ref.get_count();
let debug_id = node_ref.node.debug_id;
seq_print!(
m,
" ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
r.debug_id,
r.handle,
if dead { "dead " } else { "" },
);
}
}
let inner = self.inner.lock();
for work in &inner.work {
work.debug_print(m, " ", " pending transaction ")?;
}
for _death in &inner.delivered_deaths {
seq_print!(m, " has delivered dead binder\n");
}
if let Some(mapping) = &inner.mapping {
mapping.alloc.debug_print(m)?;
}
drop(inner);
Ok(())
}
pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
self.inner.lock().work.pop_front()
}
pub(crate) fn get_work_or_register<'a>(
&'a self,
thread: &'a Arc<Thread>,
) -> GetWorkOrRegister<'a> {
let mut inner = self.inner.lock();
if let Some(work) = inner.work.pop_front() {
return GetWorkOrRegister::Work(work);
}
GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
}
fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
let id = {
let current = kernel::current!();
if !core::ptr::eq(current.group_leader(), &*self.task) {
pr_err!("get_current_thread was called from the wrong process.");
return Err(EINVAL);
}
current.pid()
};
{
let inner = self.inner.lock();
if let Some(thread) = inner.threads.get(&id) {
return Ok(thread.clone());
}
}
let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
let ta: Arc<Thread> = Thread::new(id, self.into())?;
let mut inner = self.inner.lock();
match inner.threads.entry(id) {
rbtree::Entry::Vacant(entry) => {
entry.insert(ta.clone(), reservation);
Ok(ta)
}
rbtree::Entry::Occupied(_entry) => {
pr_err!("Cannot create two threads with the same id.");
Err(EINVAL)
}
}
}
pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
let res = self.inner.lock().push_work(work);
match res {
Ok(()) => Ok(()),
Err((err, work)) => {
drop(work);
Err(err)
}
}
}
fn set_as_manager(
self: ArcBorrow<'_, Self>,
info: Option<FlatBinderObject>,
thread: &Thread,
) -> Result {
let (ptr, cookie, flags) = if let Some(obj) = info {
(
unsafe { obj.__bindgen_anon_1.binder },
obj.cookie,
obj.flags,
)
} else {
(0, 0, 0)
};
let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
let node = node_ref.node.clone();
self.ctx.set_manager_node(node_ref)?;
self.inner.lock().is_manager = true;
let mut owner_inner = node.owner.inner.lock();
node.force_has_count(&mut owner_inner);
Ok(())
}
fn get_node_inner(
self: ArcBorrow<'_, Self>,
ptr: u64,
cookie: u64,
flags: u32,
strong: bool,
thread: &Thread,
wrapper: Option<CritIncrWrapper>,
) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
{
let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node(ptr, cookie)? {
return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
}
}
let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node(ptr, cookie)? {
return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
}
inner.nodes.insert(rbnode);
let node_ref = inner
.new_node_ref_with_thread(node, strong, thread, wrapper)
.unwrap();
Ok(Ok(node_ref))
}
pub(crate) fn get_node(
self: ArcBorrow<'_, Self>,
ptr: u64,
cookie: u64,
flags: u32,
strong: bool,
thread: &Thread,
) -> Result<NodeRef> {
let mut wrapper = None;
for _ in 0..2 {
match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
Err(err) => return Err(err),
Ok(Ok(node_ref)) => return Ok(node_ref),
Ok(Err(CouldNotDeliverCriticalIncrement)) => {
wrapper = Some(CritIncrWrapper::new()?);
}
}
}
unreachable!()
}
pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>,
node_ref: NodeRef,
is_mananger: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
let handle = *handle_ref;
let info = refs.by_handle.get_mut(&handle).unwrap();
info.node_ref().absorb(node_ref);
return Ok(handle);
}
}
let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
let mut refs = self.node_refs.lock();
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
let handle = *handle_ref;
let info = refs.by_handle.get_mut(&handle).unwrap();
info.node_ref().absorb(node_ref);
return Ok(handle);
}
let mut target: u32 = if is_mananger { 0 } else { 1 };
for handle in refs.by_handle.keys() {
if *handle > target {
break;
}
if *handle == target {
target = target.checked_add(1).ok_or(ENOMEM)?;
}
}
let gid = node_ref.node.global_id();
let (info_proc, info_node) = {
let info_init = NodeRefInfo::new(node_ref, target, self.into());
match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info),
Err(err) => match err {},
}
};
if self.inner.lock().is_dead {
return Err(ESRCH);
}
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
refs.by_node.insert(reserve1.into_node(gid, target));
refs.by_handle.insert(reserve2.into_node(target, info_proc));
Ok(target)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
if handle == 0 {
Ok(self.ctx.get_manager_node(true)?)
} else {
Ok(self.get_node_from_handle(handle, true)?)
}
}
pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
self.node_refs
.lock()
.by_handle
.get_mut(&handle)
.ok_or(ENOENT)?
.node_ref()
.clone(strong)
}
pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
let mut inner = self.inner.lock();
let removed = unsafe { inner.delivered_deaths.remove(death) };
drop(inner);
drop(removed);
}
pub(crate) fn update_ref(
self: ArcBorrow<'_, Process>,
handle: u32,
inc: bool,
strong: bool,
) -> Result {
if inc && handle == 0 {
if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
if core::ptr::eq(&*self, &*node_ref.node.owner) {
return Err(EINVAL);
}
let _ = self.insert_or_update_handle(node_ref, true);
return Ok(());
}
}
let mut refs = self.node_refs.lock();
if let Some(info) = refs.by_handle.get_mut(&handle) {
if info.node_ref().update(inc, strong) {
if let Some(death) = info.death().take() {
death.set_cleared(true);
self.remove_from_delivered_deaths(&death);
}
unsafe { info.node_ref2().node.remove_node_info(info) };
let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle);
refs.by_node.remove(&id);
}
} else {
if !self.inner.lock().is_dead {
pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
}
}
Ok(())
}
pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
let mut inner = self.inner.lock();
if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
inner.update_node_refcount(&node, false, strong, 1, None);
}
}
pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
let ptr = reader.read::<u64>()?;
let cookie = reader.read::<u64>()?;
let mut inner = self.inner.lock();
if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
let _ = inner.push_work(node);
}
}
Ok(())
}
pub(crate) fn buffer_alloc(
self: &Arc<Self>,
debug_id: usize,
size: usize,
is_oneway: bool,
from_pid: i32,
) -> BinderResult<NewAllocation> {
use kernel::page::PAGE_SIZE;
let mut reserve_new_args = ReserveNewArgs {
debug_id,
size,
is_oneway,
pid: from_pid,
..ReserveNewArgs::default()
};
let (new_alloc, addr) = loop {
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
ReserveNew::NeedAlloc(request) => request,
};
drop(inner);
reserve_new_args = alloc_request.make_alloc()?;
};
let res = Allocation::new(
self.clone(),
debug_id,
new_alloc.offset,
size,
addr + new_alloc.offset,
new_alloc.oneway_spam_detected,
);
match self.pages.use_range(
new_alloc.offset / PAGE_SIZE,
(new_alloc.offset + size).div_ceil(PAGE_SIZE),
) {
Ok(()) => {}
Err(err) => {
pr_warn!("use_range failure {:?}", err);
return Err(err.into());
}
}
Ok(NewAllocation(res))
}
pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut()?;
let offset = ptr.checked_sub(mapping.address)?;
let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
alloc.set_info(data);
}
Some(alloc)
}
pub(crate) fn buffer_raw_free(&self, ptr: usize) {
let mut inner = self.inner.lock();
if let Some(ref mut mapping) = &mut inner.mapping {
let offset = match ptr.checked_sub(mapping.address) {
Some(offset) => offset,
None => return,
};
let freed_range = match mapping.alloc.reservation_abort(offset) {
Ok(freed_range) => freed_range,
Err(_) => {
pr_warn!(
"Pointer {:x} failed to free, base = {:x}\n",
ptr,
mapping.address
);
return;
}
};
self.pages
.stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
}
}
pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
let mut inner = self.inner.lock();
if let Some(ref mut mapping) = &mut inner.mapping {
if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
pr_warn!("Offset {} failed to be marked freeable\n", offset);
}
}
}
fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
use kernel::page::PAGE_SIZE;
let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
let mapping = Mapping::new(vma.start(), size);
let page_count = self.pages.register_with_vma(vma)?;
if page_count * PAGE_SIZE != size {
return Err(EINVAL);
}
self.inner.lock().mapping = Some(mapping);
Ok(())
}
fn version(&self, data: UserSlice) -> Result {
data.writer().write(&BinderVersion::current())
}
pub(crate) fn register_thread(&self) -> bool {
self.inner.lock().register_thread()
}
fn remove_thread(&self, thread: Arc<Thread>) {
self.inner.lock().threads.remove(&thread.id);
thread.release();
}
fn set_max_threads(&self, max: u32) {
self.inner.lock().max_threads = max;
}
fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
}
pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
self.inner.lock().oneway_spam_detection_enabled
}
fn get_node_debug_info(&self, data: UserSlice) -> Result {
let (mut reader, mut writer) = data.reader_writer();
let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
let mut out = BinderNodeDebugInfo::default();
{
let inner = self.inner.lock();
for (node_ptr, node) in &inner.nodes {
if *node_ptr > ptr {
node.populate_debug_info(&mut out, &inner);
break;
}
}
}
writer.write(&out)
}
fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
let (mut reader, mut writer) = data.reader_writer();
let mut out = reader.read::<BinderNodeInfoForRef>()?;
if out.strong_count != 0
|| out.weak_count != 0
|| out.reserved1 != 0
|| out.reserved2 != 0
|| out.reserved3 != 0
{
return Err(EINVAL);
}
if !self.inner.lock().is_manager {
return Err(EPERM);
}
{
let mut node_refs = self.node_refs.lock();
let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
let node_ref = node_info.node_ref();
let owner_inner = node_ref.node.owner.inner.lock();
node_ref.node.populate_counts(&mut out, &owner_inner);
}
writer.write(&out)
}
pub(crate) fn needs_thread(&self) -> bool {
let mut inner = self.inner.lock();
let ret = inner.requested_thread_count == 0
&& inner.ready_threads.is_empty()
&& inner.started_thread_count < inner.max_threads;
if ret {
inner.requested_thread_count += 1
}
ret
}
pub(crate) fn request_death(
self: &Arc<Self>,
reader: &mut UserSliceReader,
thread: &Thread,
) -> Result {
let handle: u32 = reader.read()?;
let cookie: u64 = reader.read()?;
let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
thread.push_return_work(BR_ERROR);
})?;
let mut refs = self.node_refs.lock();
let Some(info) = refs.by_handle.get_mut(&handle) else {
pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
return Ok(());
};
if info.death().is_some() {
pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
return Ok(());
}
let death = {
let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
match death.pin_init_with(death_init) {
Ok(death) => death,
Err(err) => match err {},
}
};
{
let owner = info.node_ref2().node.owner.clone();
let mut owner_inner = owner.inner.lock();
if owner_inner.is_dead {
let death = Arc::from(death);
*info.death() = Some(death.clone());
drop(owner_inner);
death.set_dead();
} else {
let death = ListArc::from(death);
*info.death() = Some(death.clone_arc());
info.node_ref().node.add_death(death, &mut owner_inner);
}
}
Ok(())
}
pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
let handle: u32 = reader.read()?;
let cookie: u64 = reader.read()?;
let mut refs = self.node_refs.lock();
let Some(info) = refs.by_handle.get_mut(&handle) else {
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
return Ok(());
};
let Some(death) = info.death().take() else {
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
return Ok(());
};
if death.cookie != cookie {
*info.death() = Some(death);
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
return Ok(());
}
if death.set_cleared(false) {
if let Some(death) = ListArc::try_from_arc_or_drop(death) {
let _ = thread.push_work_if_looper(death);
}
}
Ok(())
}
pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
death.set_notification_done(thread);
}
}
pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
let mut inner = self.inner.lock();
WithNodes {
nodes: take(&mut inner.nodes),
inner,
}
}
fn deferred_flush(&self) {
let inner = self.inner.lock();
for thread in inner.threads.values() {
thread.exit_looper();
}
}
fn deferred_release(self: Arc<Self>) {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
inner.is_frozen = false;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
};
if is_manager {
self.ctx.unset_manager_node();
}
self.ctx.deregister_process(&self);
let binderfs_file = self.inner.lock().binderfs_file.take();
drop(binderfs_file);
let threads = {
let mut inner = self.inner.lock();
let threads = take(&mut inner.threads);
let ready = take(&mut inner.ready_threads);
drop(inner);
drop(ready);
for thread in threads.values() {
thread.release();
}
threads
};
{
while let Some(node) = {
let mut lock = self.inner.lock();
lock.nodes.cursor_front().map(|c| c.remove_current().1)
} {
node.to_key_value().1.release();
}
}
for info in self.node_refs.lock().by_handle.values_mut() {
unsafe { info.node_ref2().node.remove_node_info(info) };
let death = if let Some(existing) = info.death().take() {
existing
} else {
continue;
};
death.set_cleared(false);
}
let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
for listener in freeze_listeners.values() {
listener.on_process_exit(&self);
}
drop(freeze_listeners);
{
let mut refs = self.node_refs.lock();
let by_handle = take(&mut refs.by_handle);
let by_node = take(&mut refs.by_node);
drop(refs);
drop(by_node);
drop(by_handle);
}
while let Some(work) = self.get_work() {
work.into_arc().cancel();
}
let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
drop(delivered_deaths);
let omapping = self.inner.lock().mapping.take();
if let Some(mut mapping) = omapping {
let address = mapping.address;
mapping
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
pr_warn!(
"{}: removing orphan mapping {offset}:{size}\n",
self.pid_in_current_ns()
);
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
alloc.set_info(data);
}
drop(alloc)
});
}
drop(threads);
}
pub(crate) fn drop_outstanding_txn(&self) {
let wake = {
let mut inner = self.inner.lock();
if inner.outstanding_txns == 0 {
pr_err!("outstanding_txns underflow");
return;
}
inner.outstanding_txns -= 1;
inner.is_frozen && inner.outstanding_txns == 0
};
if wake {
self.freeze_wait.notify_all();
}
}
pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
if info.enable == 0 {
let msgs = self.prepare_freeze_messages()?;
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = false;
drop(inner);
msgs.send_messages();
return Ok(());
}
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
inner.is_frozen = true;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
while jiffies > 0 {
if inner.outstanding_txns == 0 {
break;
}
match self
.freeze_wait
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
inner.is_frozen = false;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
jiffies = remaining;
}
CondVarTimeoutResult::Timeout => {
jiffies = 0;
}
}
}
}
if inner.txns_pending_locked() {
inner.is_frozen = false;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
self.inner.lock().is_frozen = false;
Err(ENOMEM)
}
}
}
}
}
fn get_frozen_status(data: UserSlice) -> Result {
let (mut reader, mut writer) = data.reader_writer();
let mut info = reader.read::<BinderFrozenStatusInfo>()?;
info.sync_recv = 0;
info.async_recv = 0;
let mut found = false;
for ctx in crate::context::get_all_contexts()? {
ctx.for_each_proc(|proc| {
if proc.task.pid() == info.pid as _ {
found = true;
let inner = proc.inner.lock();
let txns_pending = inner.txns_pending_locked();
info.async_recv |= inner.async_recv as u32;
info.sync_recv |= inner.sync_recv as u32;
info.sync_recv |= (txns_pending as u32) << 1;
}
});
}
if found {
writer.write(&info)?;
Ok(())
} else {
Err(EINVAL)
}
}
fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
let info = reader.read::<BinderFreezeInfo>()?;
let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
let ctxs = crate::context::get_all_contexts()?;
for ctx in ctxs {
for proc in ctx.get_procs_with_pid(info.pid as i32)? {
procs.push(proc, GFP_KERNEL)?;
}
}
for proc in procs {
proc.ioctl_freeze(&info)?;
}
Ok(())
}
impl Process {
fn ioctl_write_only(
this: ArcBorrow<'_, Process>,
_file: &File,
cmd: u32,
reader: &mut UserSliceReader,
) -> Result {
let thread = this.get_current_thread()?;
match cmd {
uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
uapi::BINDER_SET_CONTEXT_MGR_EXT => {
this.set_as_manager(Some(reader.read()?), &thread)?
}
uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
this.set_oneway_spam_detection_enabled(reader.read()?)
}
uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
_ => return Err(EINVAL),
}
Ok(())
}
fn ioctl_write_read(
this: ArcBorrow<'_, Process>,
file: &File,
cmd: u32,
data: UserSlice,
) -> Result {
let thread = this.get_current_thread()?;
let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
match cmd {
uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
uapi::BINDER_VERSION => this.version(data)?,
uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
_ => return Err(EINVAL),
}
Ok(())
}
}
impl Process {
pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
Self::new(ctx.into(), ARef::from(file.cred()))
}
pub(crate) fn release(this: Arc<Process>, _file: &File) {
let binderfs_file;
let should_schedule;
{
let mut inner = this.inner.lock();
should_schedule = inner.defer_work == 0;
inner.defer_work |= PROC_DEFER_RELEASE;
binderfs_file = inner.binderfs_file.take();
}
if should_schedule {
let _ = workqueue::system().enqueue(this);
}
drop(binderfs_file);
}
pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
let should_schedule;
{
let mut inner = this.inner.lock();
should_schedule = inner.defer_work == 0;
inner.defer_work |= PROC_DEFER_FLUSH;
}
if should_schedule {
let _ = workqueue::system().enqueue(Arc::from(this));
}
Ok(())
}
pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
use kernel::uapi::{_IOC_READ, _IOC_WRITE};
crate::trace::trace_ioctl(cmd, arg);
let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
match _IOC_DIR(cmd) {
_IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
_IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
_ => Err(EINVAL),
}
}
pub(crate) fn compat_ioctl(
this: ArcBorrow<'_, Process>,
file: &File,
cmd: u32,
arg: usize,
) -> Result {
Self::ioctl(this, file, cmd, arg)
}
pub(crate) fn mmap(
this: ArcBorrow<'_, Process>,
_file: &File,
vma: &mm::virt::VmaNew,
) -> Result {
if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
return Err(EINVAL);
}
if vma.start() == 0 {
return Err(EINVAL);
}
vma.try_clear_maywrite().map_err(|_| EPERM)?;
vma.set_dontcopy();
vma.set_mixedmap();
this.create_mapping(vma)
}
pub(crate) fn poll(
this: ArcBorrow<'_, Process>,
file: &File,
table: PollTable<'_>,
) -> Result<u32> {
let thread = this.get_current_thread()?;
let (from_proc, mut mask) = thread.poll(file, table);
if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
mask |= bindings::POLLIN;
}
Ok(mask)
}
}
pub(crate) struct Registration<'a> {
thread: &'a Arc<Thread>,
}
impl<'a> Registration<'a> {
fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
guard.ready_threads.push_front(list_arc);
} else {
pr_err!("Same thread registered with `ready_threads` twice.");
}
Self { thread }
}
}
impl Drop for Registration<'_> {
fn drop(&mut self) {
let mut inner = self.thread.process.inner.lock();
unsafe { inner.ready_threads.remove(self.thread) };
}
}
pub(crate) struct WithNodes<'a> {
pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
pub(crate) nodes: RBTree<u64, DArc<Node>>,
}
impl Drop for WithNodes<'_> {
fn drop(&mut self) {
core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
if self.nodes.iter().next().is_some() {
pr_err!("nodes array was modified while using lock_with_nodes\n");
}
}
}
pub(crate) enum GetWorkOrRegister<'a> {
Work(DLArc<dyn DeliverToRead>),
Register(Registration<'a>),
}