Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/android/binder/range_alloc/mod.rs
29537 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
// Copyright (C) 2025 Google LLC.
4
5
use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
6
7
mod tree;
8
use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
9
10
mod array;
11
use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
12
13
enum DescriptorState<T> {
14
Reserved(Reservation),
15
Allocated(Allocation<T>),
16
}
17
18
impl<T> DescriptorState<T> {
19
fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
20
DescriptorState::Reserved(Reservation {
21
debug_id,
22
is_oneway,
23
pid,
24
})
25
}
26
27
fn pid(&self) -> Pid {
28
match self {
29
DescriptorState::Reserved(inner) => inner.pid,
30
DescriptorState::Allocated(inner) => inner.reservation.pid,
31
}
32
}
33
34
fn is_oneway(&self) -> bool {
35
match self {
36
DescriptorState::Reserved(inner) => inner.is_oneway,
37
DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
38
}
39
}
40
}
41
42
#[derive(Clone)]
43
struct Reservation {
44
debug_id: usize,
45
is_oneway: bool,
46
pid: Pid,
47
}
48
49
impl Reservation {
50
fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
51
Allocation {
52
data,
53
reservation: self,
54
}
55
}
56
}
57
58
struct Allocation<T> {
59
reservation: Reservation,
60
data: Option<T>,
61
}
62
63
impl<T> Allocation<T> {
64
fn deallocate(self) -> (Reservation, Option<T>) {
65
(self.reservation, self.data)
66
}
67
68
fn debug_id(&self) -> usize {
69
self.reservation.debug_id
70
}
71
72
fn take(&mut self) -> Option<T> {
73
self.data.take()
74
}
75
}
76
77
/// The array implementation must switch to the tree if it wants to go beyond this number of
78
/// ranges.
79
const TREE_THRESHOLD: usize = 8;
80
81
/// Represents a range of pages that have just become completely free.
82
#[derive(Copy, Clone)]
83
pub(crate) struct FreedRange {
84
pub(crate) start_page_idx: usize,
85
pub(crate) end_page_idx: usize,
86
}
87
88
impl FreedRange {
89
fn interior_pages(offset: usize, size: usize) -> FreedRange {
90
FreedRange {
91
// Divide round up
92
start_page_idx: offset.div_ceil(PAGE_SIZE),
93
// Divide round down
94
end_page_idx: (offset + size) / PAGE_SIZE,
95
}
96
}
97
}
98
99
struct Range<T> {
100
offset: usize,
101
size: usize,
102
state: DescriptorState<T>,
103
}
104
105
impl<T> Range<T> {
106
fn endpoint(&self) -> usize {
107
self.offset + self.size
108
}
109
}
110
111
pub(crate) struct RangeAllocator<T> {
112
inner: Impl<T>,
113
}
114
115
enum Impl<T> {
116
Empty(usize),
117
Array(ArrayRangeAllocator<T>),
118
Tree(TreeRangeAllocator<T>),
119
}
120
121
impl<T> RangeAllocator<T> {
122
pub(crate) fn new(size: usize) -> Self {
123
Self {
124
inner: Impl::Empty(size),
125
}
126
}
127
128
pub(crate) fn free_oneway_space(&self) -> usize {
129
match &self.inner {
130
Impl::Empty(size) => size / 2,
131
Impl::Array(array) => array.free_oneway_space(),
132
Impl::Tree(tree) => tree.free_oneway_space(),
133
}
134
}
135
136
pub(crate) fn count_buffers(&self) -> usize {
137
match &self.inner {
138
Impl::Empty(_size) => 0,
139
Impl::Array(array) => array.count_buffers(),
140
Impl::Tree(tree) => tree.count_buffers(),
141
}
142
}
143
144
pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
145
match &self.inner {
146
Impl::Empty(_size) => Ok(()),
147
Impl::Array(array) => array.debug_print(m),
148
Impl::Tree(tree) => tree.debug_print(m),
149
}
150
}
151
152
/// Try to reserve a new buffer, using the provided allocation if necessary.
153
pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
154
match &mut self.inner {
155
Impl::Empty(size) => {
156
let empty_array = match args.empty_array_alloc.take() {
157
Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
158
None => {
159
return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
160
args,
161
need_empty_array_alloc: true,
162
need_new_tree_alloc: false,
163
need_tree_alloc: false,
164
}))
165
}
166
};
167
168
self.inner = Impl::Array(empty_array);
169
self.reserve_new(args)
170
}
171
Impl::Array(array) if array.is_full() => {
172
let allocs = match args.new_tree_alloc {
173
Some(ref mut allocs) => allocs,
174
None => {
175
return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
176
args,
177
need_empty_array_alloc: false,
178
need_new_tree_alloc: true,
179
need_tree_alloc: true,
180
}))
181
}
182
};
183
184
let new_tree =
185
TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
186
187
self.inner = Impl::Tree(new_tree);
188
self.reserve_new(args)
189
}
190
Impl::Array(array) => {
191
let offset =
192
array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
193
Ok(ReserveNew::Success(ReserveNewSuccess {
194
offset,
195
oneway_spam_detected: false,
196
_empty_array_alloc: args.empty_array_alloc,
197
_new_tree_alloc: args.new_tree_alloc,
198
_tree_alloc: args.tree_alloc,
199
}))
200
}
201
Impl::Tree(tree) => {
202
let alloc = match args.tree_alloc {
203
Some(alloc) => alloc,
204
None => {
205
return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
206
args,
207
need_empty_array_alloc: false,
208
need_new_tree_alloc: false,
209
need_tree_alloc: true,
210
}));
211
}
212
};
213
let (offset, oneway_spam_detected) =
214
tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
215
Ok(ReserveNew::Success(ReserveNewSuccess {
216
offset,
217
oneway_spam_detected,
218
_empty_array_alloc: args.empty_array_alloc,
219
_new_tree_alloc: args.new_tree_alloc,
220
_tree_alloc: None,
221
}))
222
}
223
}
224
}
225
226
/// Deletes the allocations at `offset`.
227
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
228
match &mut self.inner {
229
Impl::Empty(_size) => Err(EINVAL),
230
Impl::Array(array) => array.reservation_abort(offset),
231
Impl::Tree(tree) => {
232
let freed_range = tree.reservation_abort(offset)?;
233
if tree.is_empty() {
234
self.inner = Impl::Empty(tree.total_size());
235
}
236
Ok(freed_range)
237
}
238
}
239
}
240
241
/// Called when an allocation is no longer in use by the kernel.
242
///
243
/// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
244
/// the `T` when an error is returned.
245
pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
246
match &mut self.inner {
247
Impl::Empty(_size) => Err(EINVAL),
248
Impl::Array(array) => array.reservation_commit(offset, data),
249
Impl::Tree(tree) => tree.reservation_commit(offset, data),
250
}
251
}
252
253
/// Called when the kernel starts using an allocation.
254
///
255
/// Returns the size of the existing entry and the data associated with it.
256
pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
257
match &mut self.inner {
258
Impl::Empty(_size) => Err(EINVAL),
259
Impl::Array(array) => array.reserve_existing(offset),
260
Impl::Tree(tree) => tree.reserve_existing(offset),
261
}
262
}
263
264
/// Call the provided callback at every allocated region.
265
///
266
/// This destroys the range allocator. Used only during shutdown.
267
pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
268
match &mut self.inner {
269
Impl::Empty(_size) => {}
270
Impl::Array(array) => array.take_for_each(callback),
271
Impl::Tree(tree) => tree.take_for_each(callback),
272
}
273
}
274
}
275
276
/// The arguments for `reserve_new`.
277
#[derive(Default)]
278
pub(crate) struct ReserveNewArgs<T> {
279
pub(crate) size: usize,
280
pub(crate) is_oneway: bool,
281
pub(crate) debug_id: usize,
282
pub(crate) pid: Pid,
283
pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
284
pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
285
pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
286
}
287
288
/// The return type of `ReserveNew`.
289
pub(crate) enum ReserveNew<T> {
290
Success(ReserveNewSuccess<T>),
291
NeedAlloc(ReserveNewNeedAlloc<T>),
292
}
293
294
/// Returned by `reserve_new` when the reservation was successul.
295
pub(crate) struct ReserveNewSuccess<T> {
296
pub(crate) offset: usize,
297
pub(crate) oneway_spam_detected: bool,
298
299
// If the user supplied an allocation that we did not end up using, then we return it here.
300
// The caller will kfree it outside of the lock.
301
_empty_array_alloc: Option<EmptyArrayAlloc<T>>,
302
_new_tree_alloc: Option<FromArrayAllocs<T>>,
303
_tree_alloc: Option<ReserveNewTreeAlloc<T>>,
304
}
305
306
/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
307
/// again.
308
pub(crate) struct ReserveNewNeedAlloc<T> {
309
args: ReserveNewArgs<T>,
310
need_empty_array_alloc: bool,
311
need_new_tree_alloc: bool,
312
need_tree_alloc: bool,
313
}
314
315
impl<T> ReserveNewNeedAlloc<T> {
316
/// Make the necessary allocations for another call to `reserve_new`.
317
pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
318
if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
319
self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
320
}
321
if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
322
self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
323
}
324
if self.need_tree_alloc && self.args.tree_alloc.is_none() {
325
self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
326
}
327
Ok(self.args)
328
}
329
}
330
331