Path: blob/master/drivers/android/binder/range_alloc/array.rs
29536 views
// SPDX-License-Identifier: GPL-2.012// Copyright (C) 2025 Google LLC.34use kernel::{5page::{PAGE_MASK, PAGE_SIZE},6prelude::*,7seq_file::SeqFile,8seq_print,9task::Pid,10};1112use crate::range_alloc::{DescriptorState, FreedRange, Range};1314/// Keeps track of allocations in a process' mmap.15///16/// Each process has an mmap where the data for incoming transactions will be placed. This struct17/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that18/// has metadata related to the allocation. We also keep track of available free space.19pub(super) struct ArrayRangeAllocator<T> {20/// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*21/// store the free ranges.22///23/// Sorted by offset.24pub(super) ranges: KVec<Range<T>>,25size: usize,26free_oneway_space: usize,27}2829struct FindEmptyRes {30/// Which index in `ranges` should we insert the new range at?31///32/// Inserting the new range at this index keeps `ranges` sorted.33insert_at_idx: usize,34/// Which offset should we insert the new range at?35insert_at_offset: usize,36}3738impl<T> ArrayRangeAllocator<T> {39pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {40Self {41ranges: alloc.ranges,42size,43free_oneway_space: size / 2,44}45}4647pub(crate) fn free_oneway_space(&self) -> usize {48self.free_oneway_space49}5051pub(crate) fn count_buffers(&self) -> usize {52self.ranges.len()53}5455pub(crate) fn total_size(&self) -> usize {56self.size57}5859pub(crate) fn is_full(&self) -> bool {60self.ranges.len() == self.ranges.capacity()61}6263pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {64for range in &self.ranges {65seq_print!(66m,67" buffer {}: {} size {} pid {} oneway {}",680,69range.offset,70range.size,71range.state.pid(),72range.state.is_oneway(),73);74if let DescriptorState::Reserved(_) = range.state {75seq_print!(m, " reserved\n");76} else {77seq_print!(m, " allocated\n");78}79}80Ok(())81}8283/// Find somewhere to put a new range.84///85/// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that86/// fragmentation isn't a big issue when we don't have many ranges.87///88/// Returns the index that the new range should have in `self.ranges` after insertion.89fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {90let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);9192if size <= self.total_size() - after_last_range {93// We can put the range at the end, so just do that.94Some(FindEmptyRes {95insert_at_idx: self.ranges.len(),96insert_at_offset: after_last_range,97})98} else {99let mut end_of_prev = 0;100for (i, range) in self.ranges.iter().enumerate() {101// Does it fit before the i'th range?102if size <= range.offset - end_of_prev {103return Some(FindEmptyRes {104insert_at_idx: i,105insert_at_offset: end_of_prev,106});107}108end_of_prev = range.endpoint();109}110None111}112}113114pub(crate) fn reserve_new(115&mut self,116debug_id: usize,117size: usize,118is_oneway: bool,119pid: Pid,120) -> Result<usize> {121// Compute new value of free_oneway_space, which is set only on success.122let new_oneway_space = if is_oneway {123match self.free_oneway_space.checked_sub(size) {124Some(new_oneway_space) => new_oneway_space,125None => return Err(ENOSPC),126}127} else {128self.free_oneway_space129};130131let FindEmptyRes {132insert_at_idx,133insert_at_offset,134} = self.find_empty_range(size).ok_or(ENOSPC)?;135self.free_oneway_space = new_oneway_space;136137let new_range = Range {138offset: insert_at_offset,139size,140state: DescriptorState::new(is_oneway, debug_id, pid),141};142// Insert the value at the given index to keep the array sorted.143self.ranges144.insert_within_capacity(insert_at_idx, new_range)145.ok()146.unwrap();147148Ok(insert_at_offset)149}150151pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {152// This could use a binary search, but linear scans are usually faster for small arrays.153let i = self154.ranges155.iter()156.position(|range| range.offset == offset)157.ok_or(EINVAL)?;158let range = &self.ranges[i];159160if let DescriptorState::Allocated(_) = range.state {161return Err(EPERM);162}163164let size = range.size;165let offset = range.offset;166167if range.state.is_oneway() {168self.free_oneway_space += size;169}170171// This computes the range of pages that are no longer used by *any* allocated range. The172// caller will mark them as unused, which means that they can be freed if the system comes173// under memory pressure.174let mut freed_range = FreedRange::interior_pages(offset, size);175#[expect(clippy::collapsible_if)] // reads better like this176if offset % PAGE_SIZE != 0 {177if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {178freed_range.start_page_idx -= 1;179}180}181if range.endpoint() % PAGE_SIZE != 0 {182let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;183if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {184freed_range.end_page_idx += 1;185}186}187188self.ranges.remove(i)?;189Ok(freed_range)190}191192pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {193// This could use a binary search, but linear scans are usually faster for small arrays.194let range = self195.ranges196.iter_mut()197.find(|range| range.offset == offset)198.ok_or(ENOENT)?;199200let DescriptorState::Reserved(reservation) = &range.state else {201return Err(ENOENT);202};203204range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));205Ok(())206}207208pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {209// This could use a binary search, but linear scans are usually faster for small arrays.210let range = self211.ranges212.iter_mut()213.find(|range| range.offset == offset)214.ok_or(ENOENT)?;215216let DescriptorState::Allocated(allocation) = &mut range.state else {217return Err(ENOENT);218};219220let data = allocation.take();221let debug_id = allocation.reservation.debug_id;222range.state = DescriptorState::Reserved(allocation.reservation.clone());223Ok((range.size, debug_id, data))224}225226pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {227for range in self.ranges.iter_mut() {228if let DescriptorState::Allocated(allocation) = &mut range.state {229callback(230range.offset,231range.size,232allocation.reservation.debug_id,233allocation.data.take(),234);235}236}237}238}239240pub(crate) struct EmptyArrayAlloc<T> {241ranges: KVec<Range<T>>,242}243244impl<T> EmptyArrayAlloc<T> {245pub(crate) fn try_new(capacity: usize) -> Result<Self> {246Ok(Self {247ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,248})249}250}251252253