// SPDX-License-Identifier: GPL-2.012// Copyright (C) 2025 Google LLC.34//! This module has utilities for managing a page range where unused pages may be reclaimed by a5//! vma shrinker.67// To avoid deadlocks, locks are taken in the order:8//9// 1. mmap lock10// 2. spinlock11// 3. lru spinlock12//13// The shrinker will use trylock methods because it locks them in a different order.1415use core::{16marker::PhantomPinned,17mem::{size_of, size_of_val, MaybeUninit},18ptr,19};2021use kernel::{22bindings,23error::Result,24ffi::{c_ulong, c_void},25mm::{virt, Mm, MmWithUser},26new_mutex, new_spinlock,27page::{Page, PAGE_SHIFT, PAGE_SIZE},28prelude::*,29str::CStr,30sync::{aref::ARef, Mutex, SpinLock},31task::Pid,32transmute::FromBytes,33types::Opaque,34uaccess::UserSliceReader,35};3637/// Represents a shrinker that can be registered with the kernel.38///39/// Each shrinker can be used by many `ShrinkablePageRange` objects.40#[repr(C)]41pub(crate) struct Shrinker {42inner: Opaque<*mut bindings::shrinker>,43list_lru: Opaque<bindings::list_lru>,44}4546// SAFETY: The shrinker and list_lru are thread safe.47unsafe impl Send for Shrinker {}48// SAFETY: The shrinker and list_lru are thread safe.49unsafe impl Sync for Shrinker {}5051impl Shrinker {52/// Create a new shrinker.53///54/// # Safety55///56/// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have57/// been called exactly once, and it must not have returned an error.58pub(crate) const unsafe fn new() -> Self {59Self {60inner: Opaque::uninit(),61list_lru: Opaque::uninit(),62}63}6465/// Register this shrinker with the kernel.66pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {67// SAFETY: These fields are not yet used, so it's okay to zero them.68unsafe {69self.inner.get().write(ptr::null_mut());70self.list_lru.get().write_bytes(0, 1);71}7273// SAFETY: The field is not yet used, so we can initialize it.74let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };75if ret != 0 {76return Err(Error::from_errno(ret));77}7879// SAFETY: The `name` points at a valid c string.80let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };81if shrinker.is_null() {82// SAFETY: We initialized it, so its okay to destroy it.83unsafe { bindings::list_lru_destroy(self.list_lru.get()) };84return Err(Error::from_errno(ret));85}8687// SAFETY: We're about to register the shrinker, and these are the fields we need to88// initialize. (All other fields are already zeroed.)89unsafe {90(&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));91(&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));92(&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());93}9495// SAFETY: The new shrinker has been fully initialized, so we can register it.96unsafe { bindings::shrinker_register(shrinker) };9798// SAFETY: This initializes the pointer to the shrinker so that we can use it.99unsafe { self.inner.get().write(shrinker) };100101Ok(())102}103}104105/// A container that manages a page range in a vma.106///107/// The pages can be thought of as an array of booleans of whether the pages are usable. The108/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false109/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed110/// immediately. Instead, it is made available to the memory shrinker to free it if the device is111/// under memory pressure.112///113/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no114/// way to know whether an index ends up with true or false if a call to `use_range` races with115/// another call to `stop_using_range` on a given index.116///117/// It's also okay for the two methods to race with themselves, e.g. if two threads call118/// `use_range` on the same index, then that's fine and neither call will return until the page is119/// allocated and mapped.120///121/// The methods that read or write to a range require that the page is marked as in use. So it is122/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or123/// write to the page.124#[pin_data(PinnedDrop)]125pub(crate) struct ShrinkablePageRange {126/// Shrinker object registered with the kernel.127shrinker: &'static Shrinker,128/// Pid using this page range. Only used as debugging information.129pid: Pid,130/// The mm for the relevant process.131mm: ARef<Mm>,132/// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.133#[pin]134mm_lock: Mutex<()>,135/// Spinlock protecting changes to pages.136#[pin]137lock: SpinLock<Inner>,138139/// Must not move, since page info has pointers back.140#[pin]141_pin: PhantomPinned,142}143144struct Inner {145/// Array of pages.146///147/// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive148/// ownership. To deal with that, we manage it using raw pointers.149pages: *mut PageInfo,150/// Length of the `pages` array.151size: usize,152/// The address of the vma to insert the pages into.153vma_addr: usize,154}155156// SAFETY: proper locking is in place for `Inner`157unsafe impl Send for Inner {}158159type StableMmGuard =160kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;161162/// An array element that describes the current state of a page.163///164/// There are three states:165///166/// * Free. The page is None. The `lru` element is not queued.167/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.168/// * Used. The page is Some. The `lru` element is not queued.169///170/// When an element is available, the shrinker is able to free the page.171#[repr(C)]172struct PageInfo {173lru: bindings::list_head,174page: Option<Page>,175range: *const ShrinkablePageRange,176}177178impl PageInfo {179/// # Safety180///181/// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.182unsafe fn set_page(me: *mut PageInfo, page: Page) {183// SAFETY: This pointer offset is in bounds.184let ptr = unsafe { &raw mut (*me).page };185186// SAFETY: The pointer is valid for writing, so also valid for reading.187if unsafe { (*ptr).is_some() } {188pr_err!("set_page called when there is already a page");189// SAFETY: We will initialize the page again below.190unsafe { ptr::drop_in_place(ptr) };191}192193// SAFETY: The pointer is valid for writing.194unsafe { ptr::write(ptr, Some(page)) };195}196197/// # Safety198///199/// The caller ensures that reading from `me.page` is ok for the duration of 'a.200unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {201// SAFETY: This pointer offset is in bounds.202let ptr = unsafe { &raw const (*me).page };203204// SAFETY: The pointer is valid for reading.205unsafe { (*ptr).as_ref() }206}207208/// # Safety209///210/// The caller ensures that writing to `me.page` is ok for the duration of 'a.211unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {212// SAFETY: This pointer offset is in bounds.213let ptr = unsafe { &raw mut (*me).page };214215// SAFETY: The pointer is valid for reading.216unsafe { (*ptr).take() }217}218219/// Add this page to the lru list, if not already in the list.220///221/// # Safety222///223/// The pointer must be valid, and it must be the right shrinker and nid.224unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {225// SAFETY: This pointer offset is in bounds.226let lru_ptr = unsafe { &raw mut (*me).lru };227// SAFETY: The lru pointer is valid, and we're not using it with any other lru list.228unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };229}230231/// Remove this page from the lru list, if it is in the list.232///233/// # Safety234///235/// The pointer must be valid, and it must be the right shrinker and nid.236unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {237// SAFETY: This pointer offset is in bounds.238let lru_ptr = unsafe { &raw mut (*me).lru };239// SAFETY: The lru pointer is valid, and we're not using it with any other lru list.240unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };241}242}243244impl ShrinkablePageRange {245/// Create a new `ShrinkablePageRange` using the given shrinker.246pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {247try_pin_init!(Self {248shrinker,249pid: kernel::current!().pid(),250mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),251mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),252lock <- new_spinlock!(Inner {253pages: ptr::null_mut(),254size: 0,255vma_addr: 0,256}, "ShrinkablePageRange"),257_pin: PhantomPinned,258})259}260261pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {262// SAFETY: This extends the duration of the reference. Since this call happens before263// `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block264// until the returned guard is dropped. This ensures that the guard is valid until dropped.265let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };266267mm_lock.try_lock()268}269270/// Register a vma with this page range. Returns the size of the region.271pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {272let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);273let num_pages = num_bytes >> PAGE_SHIFT;274275if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {276pr_debug!("Failed to register with vma: invalid vma->vm_mm");277return Err(EINVAL);278}279if num_pages == 0 {280pr_debug!("Failed to register with vma: size zero");281return Err(EINVAL);282}283284let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;285286// SAFETY: This just initializes the pages array.287unsafe {288let self_ptr = self as *const ShrinkablePageRange;289for i in 0..num_pages {290let info = pages.as_mut_ptr().add(i);291(&raw mut (*info).range).write(self_ptr);292(&raw mut (*info).page).write(None);293let lru = &raw mut (*info).lru;294(&raw mut (*lru).next).write(lru);295(&raw mut (*lru).prev).write(lru);296}297}298299let mut inner = self.lock.lock();300if inner.size > 0 {301pr_debug!("Failed to register with vma: already registered");302drop(inner);303return Err(EBUSY);304}305306inner.pages = pages.into_raw_parts().0;307inner.size = num_pages;308inner.vma_addr = vma.start();309310Ok(num_pages)311}312313/// Make sure that the given pages are allocated and mapped.314///315/// Must not be called from an atomic context.316pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {317if start >= end {318return Ok(());319}320let mut inner = self.lock.lock();321assert!(end <= inner.size);322323for i in start..end {324// SAFETY: This pointer offset is in bounds.325let page_info = unsafe { inner.pages.add(i) };326327// SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.328if let Some(page) = unsafe { PageInfo::get_page(page_info) } {329// Since we're going to use the page, we should remove it from the lru list so that330// the shrinker will not free it.331//332// SAFETY: The pointer is valid, and this is the right shrinker.333//334// The shrinker can't free the page between the check and this call to335// `list_lru_del` because we hold the lock.336unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };337} else {338// We have to allocate a new page. Use the slow path.339drop(inner);340// SAFETY: `i < end <= inner.size` so `i` is in bounds.341match unsafe { self.use_page_slow(i) } {342Ok(()) => {}343Err(err) => {344pr_warn!("Error in use_page_slow: {:?}", err);345return Err(err);346}347}348inner = self.lock.lock();349}350}351Ok(())352}353354/// Mark the given page as in use, slow path.355///356/// Must not be called from an atomic context.357///358/// # Safety359///360/// Assumes that `i` is in bounds.361#[cold]362unsafe fn use_page_slow(&self, i: usize) -> Result<()> {363let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;364365let mm_mutex = self.mm_lock.lock();366let inner = self.lock.lock();367368// SAFETY: This pointer offset is in bounds.369let page_info = unsafe { inner.pages.add(i) };370371// SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.372if let Some(page) = unsafe { PageInfo::get_page(page_info) } {373// The page was already there, or someone else added the page while we didn't hold the374// spinlock.375//376// SAFETY: The pointer is valid, and this is the right shrinker.377//378// The shrinker can't free the page between the check and this call to379// `list_lru_del` because we hold the lock.380unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };381return Ok(());382}383384let vma_addr = inner.vma_addr;385// Release the spinlock while we insert the page into the vma.386drop(inner);387388// No overflow since we stay in bounds of the vma.389let user_page_addr = vma_addr + (i << PAGE_SHIFT);390391// We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from392// a remote process. If the call to `mmput` races with the process shutting down, then the393// caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't394// happen until it returns to userspace. However, the caller might instead go to sleep and395// wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the396// middle of a shutdown process that won't complete until the `mm` is dropped. This can397// amount to a deadlock.398//399// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a400// workqueue.401MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)402.mmap_read_lock()403.vma_lookup(vma_addr)404.ok_or(ESRCH)?405.as_mixedmap_vma()406.ok_or(ESRCH)?407.vm_insert_page(user_page_addr, &new_page)408.inspect_err(|err| {409pr_warn!(410"Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",411user_page_addr,412vma_addr,413i,414err415)416})?;417418let inner = self.lock.lock();419420// SAFETY: The `page_info` pointer is valid and currently does not have a page. The page421// can be written to since we hold the lock.422//423// We released and reacquired the spinlock since we checked that the page is null, but we424// always hold the mm_lock mutex when setting the page to a non-null value, so it's not425// possible for someone else to have changed it since our check.426unsafe { PageInfo::set_page(page_info, new_page) };427428drop(inner);429drop(mm_mutex);430431Ok(())432}433434/// If the given page is in use, then mark it as available so that the shrinker can free it.435///436/// May be called from an atomic context.437pub(crate) fn stop_using_range(&self, start: usize, end: usize) {438if start >= end {439return;440}441let inner = self.lock.lock();442assert!(end <= inner.size);443444for i in (start..end).rev() {445// SAFETY: The pointer is in bounds.446let page_info = unsafe { inner.pages.add(i) };447448// SAFETY: Okay for reading since we have the lock.449if let Some(page) = unsafe { PageInfo::get_page(page_info) } {450// SAFETY: The pointer is valid, and it's the right shrinker.451unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };452}453}454}455456/// Helper for reading or writing to a range of bytes that may overlap with several pages.457///458/// # Safety459///460/// All pages touched by this operation must be in use for the duration of this call.461unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result462where463T: FnMut(&Page, usize, usize) -> Result,464{465if size == 0 {466return Ok(());467}468469let (pages, num_pages) = {470let inner = self.lock.lock();471(inner.pages, inner.size)472};473let num_bytes = num_pages << PAGE_SHIFT;474475// Check that the request is within the buffer.476if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {477return Err(EFAULT);478}479480let mut page_index = offset >> PAGE_SHIFT;481offset &= PAGE_SIZE - 1;482while size > 0 {483let available = usize::min(size, PAGE_SIZE - offset);484// SAFETY: The pointer is in bounds.485let page_info = unsafe { pages.add(page_index) };486// SAFETY: The caller guarantees that this page is in the "in use" state for the487// duration of this call to `iterate`, so nobody will change the page.488let page = unsafe { PageInfo::get_page(page_info) };489if page.is_none() {490pr_warn!("Page is null!");491}492let page = page.ok_or(EFAULT)?;493cb(page, offset, available)?;494size -= available;495page_index += 1;496offset = 0;497}498Ok(())499}500501/// Copy from userspace into this page range.502///503/// # Safety504///505/// All pages touched by this operation must be in use for the duration of this call.506pub(crate) unsafe fn copy_from_user_slice(507&self,508reader: &mut UserSliceReader,509offset: usize,510size: usize,511) -> Result {512// SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.513unsafe {514self.iterate(offset, size, |page, offset, to_copy| {515page.copy_from_user_slice_raw(reader, offset, to_copy)516})517}518}519520/// Copy from this page range into kernel space.521///522/// # Safety523///524/// All pages touched by this operation must be in use for the duration of this call.525pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {526let mut out = MaybeUninit::<T>::uninit();527let mut out_offset = 0;528// SAFETY: `self.iterate` has the same safety requirements as `read`.529unsafe {530self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {531// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.532let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);533// SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.534page.read_raw(obj_ptr, offset, to_copy)?;535out_offset += to_copy;536Ok(())537})?;538}539// SAFETY: We just initialised the data.540Ok(unsafe { out.assume_init() })541}542543/// Copy from kernel space into this page range.544///545/// # Safety546///547/// All pages touched by this operation must be in use for the duration of this call.548pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {549let mut obj_offset = 0;550// SAFETY: `self.iterate` has the same safety requirements as `write`.551unsafe {552self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {553// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.554let obj_ptr = (obj as *const T as *const u8).add(obj_offset);555// SAFETY: We have a reference to the object, so the pointer is valid.556page.write_raw(obj_ptr, offset, to_copy)?;557obj_offset += to_copy;558Ok(())559})560}561}562563/// Write zeroes to the given range.564///565/// # Safety566///567/// All pages touched by this operation must be in use for the duration of this call.568pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {569// SAFETY: `self.iterate` has the same safety requirements as `copy_into`.570unsafe {571self.iterate(offset, size, |page, offset, len| {572page.fill_zero_raw(offset, len)573})574}575}576}577578#[pinned_drop]579impl PinnedDrop for ShrinkablePageRange {580fn drop(self: Pin<&mut Self>) {581let (pages, size) = {582let lock = self.lock.lock();583(lock.pages, lock.size)584};585586if size == 0 {587return;588}589590// Note: This call is also necessary for the safety of `stable_trylock_mm`.591let mm_lock = self.mm_lock.lock();592593// This is the destructor, so unlike the other methods, we only need to worry about races594// with the shrinker here. Since we hold the `mm_lock`, we also can't race with the595// shrinker, and after this loop, the shrinker will not access any of our pages since we596// removed them from the lru list.597for i in 0..size {598// SAFETY: Loop is in-bounds of the size.599let p_ptr = unsafe { pages.add(i) };600// SAFETY: No other readers, so we can read.601if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {602// SAFETY: The pointer is valid and it's the right shrinker.603unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };604}605}606607drop(mm_lock);608609// SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,610// all `size` elements are initialized. Also, the array is no longer shared with the611// shrinker due to the above loop.612drop(unsafe { KVVec::from_raw_parts(pages, size, size) });613}614}615616/// # Safety617/// Called by the shrinker.618#[no_mangle]619unsafe extern "C" fn rust_shrink_count(620shrink: *mut bindings::shrinker,621_sc: *mut bindings::shrink_control,622) -> c_ulong {623// SAFETY: We can access our own private data.624let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };625// SAFETY: Accessing the lru list is okay. Just an FFI call.626unsafe { bindings::list_lru_count(list_lru) }627}628629/// # Safety630/// Called by the shrinker.631#[no_mangle]632unsafe extern "C" fn rust_shrink_scan(633shrink: *mut bindings::shrinker,634sc: *mut bindings::shrink_control,635) -> c_ulong {636// SAFETY: We can access our own private data.637let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };638// SAFETY: Caller guarantees that it is safe to read this field.639let nr_to_scan = unsafe { (*sc).nr_to_scan };640// SAFETY: Accessing the lru list is okay. Just an FFI call.641unsafe {642bindings::list_lru_walk(643list_lru,644Some(bindings::rust_shrink_free_page_wrap),645ptr::null_mut(),646nr_to_scan,647)648}649}650651const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;652const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;653654/// # Safety655/// Called by the shrinker.656#[no_mangle]657unsafe extern "C" fn rust_shrink_free_page(658item: *mut bindings::list_head,659lru: *mut bindings::list_lru_one,660_cb_arg: *mut c_void,661) -> bindings::lru_status {662// Fields that should survive after unlocking the lru lock.663let page;664let page_index;665let mm;666let mmap_read;667let mm_mutex;668let vma_addr;669670{671// CAST: The `list_head` field is first in `PageInfo`.672let info = item as *mut PageInfo;673// SAFETY: The `range` field of `PageInfo` is immutable.674let range = unsafe { &*((*info).range) };675676mm = match range.mm.mmget_not_zero() {677Some(mm) => MmWithUser::into_mmput_async(mm),678None => return LRU_SKIP,679};680681mm_mutex = match range.stable_trylock_mm() {682Some(guard) => guard,683None => return LRU_SKIP,684};685686mmap_read = match mm.mmap_read_trylock() {687Some(guard) => guard,688None => return LRU_SKIP,689};690691// We can't lock it normally here, since we hold the lru lock.692let inner = match range.lock.try_lock() {693Some(inner) => inner,694None => return LRU_SKIP,695};696697// SAFETY: The item is in this lru list, so it's okay to remove it.698unsafe { bindings::list_lru_isolate(lru, item) };699700// SAFETY: Both pointers are in bounds of the same allocation.701page_index = unsafe { info.offset_from(inner.pages) } as usize;702703// SAFETY: We hold the spinlock, so we can take the page.704//705// This sets the page pointer to zero before we unmap it from the vma. However, we call706// `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to707// insert a new page until after our call to `zap_page_range`.708page = unsafe { PageInfo::take_page(info) };709vma_addr = inner.vma_addr;710711// From this point on, we don't access this PageInfo or ShrinkablePageRange again, because712// they can be freed at any point after we unlock `lru_lock`. This is with the exception of713// `mm_mutex` which is kept alive by holding the lock.714}715716// SAFETY: The lru lock is locked when this method is called.717unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };718719if let Some(vma) = mmap_read.vma_lookup(vma_addr) {720let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);721vma.zap_page_range_single(user_page_addr, PAGE_SIZE);722}723724drop(mmap_read);725drop(mm_mutex);726drop(mm);727drop(page);728729// SAFETY: We just unlocked the lru lock, but it should be locked when we return.730unsafe { bindings::spin_lock(&raw mut (*lru).lock) };731732LRU_REMOVED_ENTRY733}734735736