Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/binder/range_alloc/array.rs
29536 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
// Copyright (C) 2025 Google LLC.
4
5
use kernel::{
6
page::{PAGE_MASK, PAGE_SIZE},
7
prelude::*,
8
seq_file::SeqFile,
9
seq_print,
10
task::Pid,
11
};
12
13
use crate::range_alloc::{DescriptorState, FreedRange, Range};
14
15
/// Keeps track of allocations in a process' mmap.
16
///
17
/// Each process has an mmap where the data for incoming transactions will be placed. This struct
18
/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
19
/// has metadata related to the allocation. We also keep track of available free space.
20
pub(super) struct ArrayRangeAllocator<T> {
21
/// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
22
/// store the free ranges.
23
///
24
/// Sorted by offset.
25
pub(super) ranges: KVec<Range<T>>,
26
size: usize,
27
free_oneway_space: usize,
28
}
29
30
struct FindEmptyRes {
31
/// Which index in `ranges` should we insert the new range at?
32
///
33
/// Inserting the new range at this index keeps `ranges` sorted.
34
insert_at_idx: usize,
35
/// Which offset should we insert the new range at?
36
insert_at_offset: usize,
37
}
38
39
impl<T> ArrayRangeAllocator<T> {
40
pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
41
Self {
42
ranges: alloc.ranges,
43
size,
44
free_oneway_space: size / 2,
45
}
46
}
47
48
pub(crate) fn free_oneway_space(&self) -> usize {
49
self.free_oneway_space
50
}
51
52
pub(crate) fn count_buffers(&self) -> usize {
53
self.ranges.len()
54
}
55
56
pub(crate) fn total_size(&self) -> usize {
57
self.size
58
}
59
60
pub(crate) fn is_full(&self) -> bool {
61
self.ranges.len() == self.ranges.capacity()
62
}
63
64
pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
65
for range in &self.ranges {
66
seq_print!(
67
m,
68
" buffer {}: {} size {} pid {} oneway {}",
69
0,
70
range.offset,
71
range.size,
72
range.state.pid(),
73
range.state.is_oneway(),
74
);
75
if let DescriptorState::Reserved(_) = range.state {
76
seq_print!(m, " reserved\n");
77
} else {
78
seq_print!(m, " allocated\n");
79
}
80
}
81
Ok(())
82
}
83
84
/// Find somewhere to put a new range.
85
///
86
/// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
87
/// fragmentation isn't a big issue when we don't have many ranges.
88
///
89
/// Returns the index that the new range should have in `self.ranges` after insertion.
90
fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
91
let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
92
93
if size <= self.total_size() - after_last_range {
94
// We can put the range at the end, so just do that.
95
Some(FindEmptyRes {
96
insert_at_idx: self.ranges.len(),
97
insert_at_offset: after_last_range,
98
})
99
} else {
100
let mut end_of_prev = 0;
101
for (i, range) in self.ranges.iter().enumerate() {
102
// Does it fit before the i'th range?
103
if size <= range.offset - end_of_prev {
104
return Some(FindEmptyRes {
105
insert_at_idx: i,
106
insert_at_offset: end_of_prev,
107
});
108
}
109
end_of_prev = range.endpoint();
110
}
111
None
112
}
113
}
114
115
pub(crate) fn reserve_new(
116
&mut self,
117
debug_id: usize,
118
size: usize,
119
is_oneway: bool,
120
pid: Pid,
121
) -> Result<usize> {
122
// Compute new value of free_oneway_space, which is set only on success.
123
let new_oneway_space = if is_oneway {
124
match self.free_oneway_space.checked_sub(size) {
125
Some(new_oneway_space) => new_oneway_space,
126
None => return Err(ENOSPC),
127
}
128
} else {
129
self.free_oneway_space
130
};
131
132
let FindEmptyRes {
133
insert_at_idx,
134
insert_at_offset,
135
} = self.find_empty_range(size).ok_or(ENOSPC)?;
136
self.free_oneway_space = new_oneway_space;
137
138
let new_range = Range {
139
offset: insert_at_offset,
140
size,
141
state: DescriptorState::new(is_oneway, debug_id, pid),
142
};
143
// Insert the value at the given index to keep the array sorted.
144
self.ranges
145
.insert_within_capacity(insert_at_idx, new_range)
146
.ok()
147
.unwrap();
148
149
Ok(insert_at_offset)
150
}
151
152
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
153
// This could use a binary search, but linear scans are usually faster for small arrays.
154
let i = self
155
.ranges
156
.iter()
157
.position(|range| range.offset == offset)
158
.ok_or(EINVAL)?;
159
let range = &self.ranges[i];
160
161
if let DescriptorState::Allocated(_) = range.state {
162
return Err(EPERM);
163
}
164
165
let size = range.size;
166
let offset = range.offset;
167
168
if range.state.is_oneway() {
169
self.free_oneway_space += size;
170
}
171
172
// This computes the range of pages that are no longer used by *any* allocated range. The
173
// caller will mark them as unused, which means that they can be freed if the system comes
174
// under memory pressure.
175
let mut freed_range = FreedRange::interior_pages(offset, size);
176
#[expect(clippy::collapsible_if)] // reads better like this
177
if offset % PAGE_SIZE != 0 {
178
if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
179
freed_range.start_page_idx -= 1;
180
}
181
}
182
if range.endpoint() % PAGE_SIZE != 0 {
183
let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
184
if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
185
freed_range.end_page_idx += 1;
186
}
187
}
188
189
self.ranges.remove(i)?;
190
Ok(freed_range)
191
}
192
193
pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
194
// This could use a binary search, but linear scans are usually faster for small arrays.
195
let range = self
196
.ranges
197
.iter_mut()
198
.find(|range| range.offset == offset)
199
.ok_or(ENOENT)?;
200
201
let DescriptorState::Reserved(reservation) = &range.state else {
202
return Err(ENOENT);
203
};
204
205
range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
206
Ok(())
207
}
208
209
pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
210
// This could use a binary search, but linear scans are usually faster for small arrays.
211
let range = self
212
.ranges
213
.iter_mut()
214
.find(|range| range.offset == offset)
215
.ok_or(ENOENT)?;
216
217
let DescriptorState::Allocated(allocation) = &mut range.state else {
218
return Err(ENOENT);
219
};
220
221
let data = allocation.take();
222
let debug_id = allocation.reservation.debug_id;
223
range.state = DescriptorState::Reserved(allocation.reservation.clone());
224
Ok((range.size, debug_id, data))
225
}
226
227
pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
228
for range in self.ranges.iter_mut() {
229
if let DescriptorState::Allocated(allocation) = &mut range.state {
230
callback(
231
range.offset,
232
range.size,
233
allocation.reservation.debug_id,
234
allocation.data.take(),
235
);
236
}
237
}
238
}
239
}
240
241
pub(crate) struct EmptyArrayAlloc<T> {
242
ranges: KVec<Range<T>>,
243
}
244
245
impl<T> EmptyArrayAlloc<T> {
246
pub(crate) fn try_new(capacity: usize) -> Result<Self> {
247
Ok(Self {
248
ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
249
})
250
}
251
}
252
253