aboutsummaryrefslogtreecommitdiffstats
diff options
-rw-r--r--queue-6.15/rust-completion-implement-initial-abstraction.patch206
-rw-r--r--queue-6.15/rust-devres-do-not-dereference-to-the-internal-revocable.patch79
-rw-r--r--queue-6.15/rust-devres-fix-race-in-devres-drop.patch205
-rw-r--r--queue-6.15/rust-revocable-indicate-whether-data-has-been-revoked-already.patch78
-rw-r--r--queue-6.15/series6
-rw-r--r--queue-6.15/x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch85
-rw-r--r--queue-6.15/x86-pkeys-simplify-pkru-update-in-signal-frame.patch69
7 files changed, 728 insertions, 0 deletions
diff --git a/queue-6.15/rust-completion-implement-initial-abstraction.patch b/queue-6.15/rust-completion-implement-initial-abstraction.patch
new file mode 100644
index 0000000000..4fb2ee391a
--- /dev/null
+++ b/queue-6.15/rust-completion-implement-initial-abstraction.patch
@@ -0,0 +1,206 @@
+From 1b56e765bf8990f1f60e124926c11fc4ac63d752 Mon Sep 17 00:00:00 2001
+From: Danilo Krummrich <dakr@kernel.org>
+Date: Thu, 12 Jun 2025 14:17:13 +0200
+Subject: rust: completion: implement initial abstraction
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+commit 1b56e765bf8990f1f60e124926c11fc4ac63d752 upstream.
+
+Implement a minimal abstraction for the completion synchronization
+primitive.
+
+This initial abstraction only adds complete_all() and
+wait_for_completion(), since that is what is required for the subsequent
+Devres patch.
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Ben Segall <bsegall@google.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://lore.kernel.org/r/20250612121817.1621-2-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/bindings/bindings_helper.h | 1
+ rust/helpers/completion.c | 8 ++
+ rust/helpers/helpers.c | 1
+ rust/kernel/sync.rs | 2
+ rust/kernel/sync/completion.rs | 112 ++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 124 insertions(+)
+ create mode 100644 rust/helpers/completion.c
+ create mode 100644 rust/kernel/sync/completion.rs
+
+--- a/rust/bindings/bindings_helper.h
++++ b/rust/bindings/bindings_helper.h
+@@ -10,6 +10,7 @@
+ #include <linux/blk-mq.h>
+ #include <linux/blk_types.h>
+ #include <linux/blkdev.h>
++#include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/cred.h>
+ #include <linux/device/faux.h>
+--- /dev/null
++++ b/rust/helpers/completion.c
+@@ -0,0 +1,8 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/completion.h>
++
++void rust_helper_init_completion(struct completion *x)
++{
++ init_completion(x);
++}
+--- a/rust/helpers/helpers.c
++++ b/rust/helpers/helpers.c
+@@ -11,6 +11,7 @@
+ #include "bug.c"
+ #include "build_assert.c"
+ #include "build_bug.c"
++#include "completion.c"
+ #include "cpumask.c"
+ #include "cred.c"
+ #include "device.c"
+--- a/rust/kernel/sync.rs
++++ b/rust/kernel/sync.rs
+@@ -10,6 +10,7 @@ use crate::types::Opaque;
+ use pin_init;
+
+ mod arc;
++pub mod completion;
+ mod condvar;
+ pub mod lock;
+ mod locked_by;
+@@ -17,6 +18,7 @@ pub mod poll;
+ pub mod rcu;
+
+ pub use arc::{Arc, ArcBorrow, UniqueArc};
++pub use completion::Completion;
+ pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
+ pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
+ pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
+--- /dev/null
++++ b/rust/kernel/sync/completion.rs
+@@ -0,0 +1,112 @@
++// SPDX-License-Identifier: GPL-2.0
++
++//! Completion support.
++//!
++//! Reference: <https://docs.kernel.org/scheduler/completion.html>
++//!
++//! C header: [`include/linux/completion.h`](srctree/include/linux/completion.h)
++
++use crate::{bindings, prelude::*, types::Opaque};
++
++/// Synchronization primitive to signal when a certain task has been completed.
++///
++/// The [`Completion`] synchronization primitive signals when a certain task has been completed by
++/// waking up other tasks that have been queued up to wait for the [`Completion`] to be completed.
++///
++/// # Examples
++///
++/// ```
++/// use kernel::sync::{Arc, Completion};
++/// use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
++///
++/// #[pin_data]
++/// struct MyTask {
++/// #[pin]
++/// work: Work<MyTask>,
++/// #[pin]
++/// done: Completion,
++/// }
++///
++/// impl_has_work! {
++/// impl HasWork<Self> for MyTask { self.work }
++/// }
++///
++/// impl MyTask {
++/// fn new() -> Result<Arc<Self>> {
++/// let this = Arc::pin_init(pin_init!(MyTask {
++/// work <- new_work!("MyTask::work"),
++/// done <- Completion::new(),
++/// }), GFP_KERNEL)?;
++///
++/// let _ = workqueue::system().enqueue(this.clone());
++///
++/// Ok(this)
++/// }
++///
++/// fn wait_for_completion(&self) {
++/// self.done.wait_for_completion();
++///
++/// pr_info!("Completion: task complete\n");
++/// }
++/// }
++///
++/// impl WorkItem for MyTask {
++/// type Pointer = Arc<MyTask>;
++///
++/// fn run(this: Arc<MyTask>) {
++/// // process this task
++/// this.done.complete_all();
++/// }
++/// }
++///
++/// let task = MyTask::new()?;
++/// task.wait_for_completion();
++/// # Ok::<(), Error>(())
++/// ```
++#[pin_data]
++pub struct Completion {
++ #[pin]
++ inner: Opaque<bindings::completion>,
++}
++
++// SAFETY: `Completion` is safe to be send to any task.
++unsafe impl Send for Completion {}
++
++// SAFETY: `Completion` is safe to be accessed concurrently.
++unsafe impl Sync for Completion {}
++
++impl Completion {
++ /// Create an initializer for a new [`Completion`].
++ pub fn new() -> impl PinInit<Self> {
++ pin_init!(Self {
++ inner <- Opaque::ffi_init(|slot: *mut bindings::completion| {
++ // SAFETY: `slot` is a valid pointer to an uninitialized `struct completion`.
++ unsafe { bindings::init_completion(slot) };
++ }),
++ })
++ }
++
++ fn as_raw(&self) -> *mut bindings::completion {
++ self.inner.get()
++ }
++
++ /// Signal all tasks waiting on this completion.
++ ///
++ /// This method wakes up all tasks waiting on this completion; after this operation the
++ /// completion is permanently done, i.e. signals all current and future waiters.
++ pub fn complete_all(&self) {
++ // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
++ unsafe { bindings::complete_all(self.as_raw()) };
++ }
++
++ /// Wait for completion of a task.
++ ///
++ /// This method waits for the completion of a task; it is not interruptible and there is no
++ /// timeout.
++ ///
++ /// See also [`Completion::complete_all`].
++ pub fn wait_for_completion(&self) {
++ // SAFETY: `self.as_raw()` is a pointer to a valid `struct completion`.
++ unsafe { bindings::wait_for_completion(self.as_raw()) };
++ }
++}
diff --git a/queue-6.15/rust-devres-do-not-dereference-to-the-internal-revocable.patch b/queue-6.15/rust-devres-do-not-dereference-to-the-internal-revocable.patch
new file mode 100644
index 0000000000..2a437dd337
--- /dev/null
+++ b/queue-6.15/rust-devres-do-not-dereference-to-the-internal-revocable.patch
@@ -0,0 +1,79 @@
+From 20c96ed278e362ae4e324ed7d8c69fb48c508d3c Mon Sep 17 00:00:00 2001
+From: Danilo Krummrich <dakr@kernel.org>
+Date: Wed, 11 Jun 2025 19:48:25 +0200
+Subject: rust: devres: do not dereference to the internal Revocable
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+commit 20c96ed278e362ae4e324ed7d8c69fb48c508d3c upstream.
+
+We can't expose direct access to the internal Revocable, since this
+allows users to directly revoke the internal Revocable without Devres
+having the chance to synchronize with the devres callback -- we have to
+guarantee that the internal Revocable has been fully revoked before
+the device is fully unbound.
+
+Hence, remove the corresponding Deref implementation and, instead,
+provide indirect accessors for the internal Revocable.
+
+Note that we can still support Devres::revoke() by implementing the
+required synchronization (which would be almost identical to the
+synchronization in Devres::drop()).
+
+Fixes: 76c01ded724b ("rust: add devres abstraction")
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Link: https://lore.kernel.org/r/20250611174827.380555-1-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/devres.rs | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/rust/kernel/devres.rs
++++ b/rust/kernel/devres.rs
+@@ -12,13 +12,11 @@ use crate::{
+ error::{Error, Result},
+ ffi::c_void,
+ prelude::*,
+- revocable::Revocable,
+- sync::{Arc, Completion},
++ revocable::{Revocable, RevocableGuard},
++ sync::{rcu, Arc, Completion},
+ types::ARef,
+ };
+
+-use core::ops::Deref;
+-
+ #[pin_data]
+ struct DevresInner<T> {
+ dev: ARef<Device>,
+@@ -196,13 +194,15 @@ impl<T> Devres<T> {
+
+ Ok(())
+ }
+-}
+
+-impl<T> Deref for Devres<T> {
+- type Target = Revocable<T>;
++ /// [`Devres`] accessor for [`Revocable::try_access`].
++ pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
++ self.0.data.try_access()
++ }
+
+- fn deref(&self) -> &Self::Target {
+- &self.0.data
++ /// [`Devres`] accessor for [`Revocable::try_access_with_guard`].
++ pub fn try_access_with_guard<'a>(&'a self, guard: &'a rcu::Guard) -> Option<&'a T> {
++ self.0.data.try_access_with_guard(guard)
+ }
+ }
+
+@@ -210,7 +210,7 @@ impl<T> Drop for Devres<T> {
+ fn drop(&mut self) {
+ // SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
+ // anymore, hence it is safe not to wait for the grace period to finish.
+- if unsafe { self.revoke_nosync() } {
++ if unsafe { self.0.data.revoke_nosync() } {
+ // We revoked `self.0.data` before the devres action did, hence try to remove it.
+ if !DevresInner::remove_action(&self.0) {
+ // We could not remove the devres action, which means that it now runs concurrently,
diff --git a/queue-6.15/rust-devres-fix-race-in-devres-drop.patch b/queue-6.15/rust-devres-fix-race-in-devres-drop.patch
new file mode 100644
index 0000000000..0fd251ca4a
--- /dev/null
+++ b/queue-6.15/rust-devres-fix-race-in-devres-drop.patch
@@ -0,0 +1,205 @@
+From f744201c6159fc7323c40936fd079525f7063598 Mon Sep 17 00:00:00 2001
+From: Danilo Krummrich <dakr@kernel.org>
+Date: Thu, 12 Jun 2025 14:17:15 +0200
+Subject: rust: devres: fix race in Devres::drop()
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+commit f744201c6159fc7323c40936fd079525f7063598 upstream.
+
+In Devres::drop() we first remove the devres action and then drop the
+wrapped device resource.
+
+The design goal is to give the owner of a Devres object control over when
+the device resource is dropped, but limit the overall scope to the
+corresponding device being bound to a driver.
+
+However, there's a race that was introduced with commit 8ff656643d30
+("rust: devres: remove action in `Devres::drop`"), but also has been
+(partially) present from the initial version on.
+
+In Devres::drop(), the devres action is removed successfully and
+subsequently the destructor of the wrapped device resource runs.
+However, there is no guarantee that the destructor of the wrapped device
+resource completes before the driver core is done unbinding the
+corresponding device.
+
+If in Devres::drop(), the devres action can't be removed, it means that
+the devres callback has been executed already, or is still running
+concurrently. In case of the latter, either Devres::drop() wins revoking
+the Revocable or the devres callback wins revoking the Revocable. If
+Devres::drop() wins, we (again) have no guarantee that the destructor of
+the wrapped device resource completes before the driver core is done
+unbinding the corresponding device.
+
+CPU0 CPU1
+------------------------------------------------------------------------
+Devres::drop() { Devres::devres_callback() {
+ self.data.revoke() { this.data.revoke() {
+ is_available.swap() == true
+ is_available.swap == false
+ }
+ }
+
+ // [...]
+ // device fully unbound
+ drop_in_place() {
+ // release device resource
+ }
+ }
+}
+
+Depending on the specific device resource, this can potentially lead to
+user-after-free bugs.
+
+In order to fix this, implement the following logic.
+
+In the devres callback, we're always good when we get to revoke the
+device resource ourselves, i.e. Revocable::revoke() returns true.
+
+If Revocable::revoke() returns false, it means that Devres::drop(),
+concurrently, already drops the device resource and we have to wait for
+Devres::drop() to signal that it finished dropping the device resource.
+
+Note that if we hit the case where we need to wait for the completion of
+Devres::drop() in the devres callback, it means that we're actually
+racing with a concurrent Devres::drop() call, which already started
+revoking the device resource for us. This is rather unlikely and means
+that the concurrent Devres::drop() already started doing our work and we
+just need to wait for it to complete it for us. Hence, there should not
+be any additional overhead from that.
+
+(Actually, for now it's even better if Devres::drop() does the work for
+us, since it can bypass the synchronize_rcu() call implied by
+Revocable::revoke(), but this goes away anyways once I get to implement
+the split devres callback approach, which allows us to first flip the
+atomics of all registered Devres objects of a certain device, execute a
+single synchronize_rcu() and then drop all revocable objects.)
+
+In Devres::drop() we try to revoke the device resource. If that is *not*
+successful, it means that the devres callback already did and we're good.
+
+Otherwise, we try to remove the devres action, which, if successful,
+means that we're good, since the device resource has just been revoked
+by us *before* we removed the devres action successfully.
+
+If the devres action could not be removed, it means that the devres
+callback must be running concurrently, hence we signal that the device
+resource has been revoked by us, using the completion.
+
+This makes it safe to drop a Devres object from any task and at any point
+of time, which is one of the design goals.
+
+Fixes: 76c01ded724b ("rust: add devres abstraction")
+Reported-by: Alice Ryhl <aliceryhl@google.com>
+Closes: https://lore.kernel.org/lkml/aD64YNuqbPPZHAa5@google.com/
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Link: https://lore.kernel.org/r/20250612121817.1621-4-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/devres.rs | 37 +++++++++++++++++++++++++++++--------
+ 1 file changed, 29 insertions(+), 8 deletions(-)
+
+--- a/rust/kernel/devres.rs
++++ b/rust/kernel/devres.rs
+@@ -13,7 +13,7 @@ use crate::{
+ ffi::c_void,
+ prelude::*,
+ revocable::Revocable,
+- sync::Arc,
++ sync::{Arc, Completion},
+ types::ARef,
+ };
+
+@@ -25,13 +25,17 @@ struct DevresInner<T> {
+ callback: unsafe extern "C" fn(*mut c_void),
+ #[pin]
+ data: Revocable<T>,
++ #[pin]
++ revoke: Completion,
+ }
+
+ /// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
+ /// manage their lifetime.
+ ///
+ /// [`Device`] bound resources should be freed when either the resource goes out of scope or the
+-/// [`Device`] is unbound respectively, depending on what happens first.
++/// [`Device`] is unbound respectively, depending on what happens first. In any case, it is always
++/// guaranteed that revoking the device resource is completed before the corresponding [`Device`]
++/// is unbound.
+ ///
+ /// To achieve that [`Devres`] registers a devres callback on creation, which is called once the
+ /// [`Device`] is unbound, revoking access to the encapsulated resource (see also [`Revocable`]).
+@@ -105,6 +109,7 @@ impl<T> DevresInner<T> {
+ dev: dev.into(),
+ callback: Self::devres_callback,
+ data <- Revocable::new(data),
++ revoke <- Completion::new(),
+ }),
+ flags,
+ )?;
+@@ -133,26 +138,28 @@ impl<T> DevresInner<T> {
+ self as _
+ }
+
+- fn remove_action(this: &Arc<Self>) {
++ fn remove_action(this: &Arc<Self>) -> bool {
+ // SAFETY:
+ // - `self.inner.dev` is a valid `Device`,
+ // - the `action` and `data` pointers are the exact same ones as given to devm_add_action()
+ // previously,
+ // - `self` is always valid, even if the action has been released already.
+- let ret = unsafe {
++ let success = unsafe {
+ bindings::devm_remove_action_nowarn(
+ this.dev.as_raw(),
+ Some(this.callback),
+ this.as_ptr() as _,
+ )
+- };
++ } == 0;
+
+- if ret == 0 {
++ if success {
+ // SAFETY: We leaked an `Arc` reference to devm_add_action() in `DevresInner::new`; if
+ // devm_remove_action_nowarn() was successful we can (and have to) claim back ownership
+ // of this reference.
+ let _ = unsafe { Arc::from_raw(this.as_ptr()) };
+ }
++
++ success
+ }
+
+ #[allow(clippy::missing_safety_doc)]
+@@ -164,7 +171,12 @@ impl<T> DevresInner<T> {
+ // `DevresInner::new`.
+ let inner = unsafe { Arc::from_raw(ptr) };
+
+- inner.data.revoke();
++ if !inner.data.revoke() {
++ // If `revoke()` returns false, it means that `Devres::drop` already started revoking
++ // `inner.data` for us. Hence we have to wait until `Devres::drop()` signals that it
++ // completed revoking `inner.data`.
++ inner.revoke.wait_for_completion();
++ }
+ }
+ }
+
+@@ -196,6 +208,15 @@ impl<T> Deref for Devres<T> {
+
+ impl<T> Drop for Devres<T> {
+ fn drop(&mut self) {
+- DevresInner::remove_action(&self.0);
++ // SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
++ // anymore, hence it is safe not to wait for the grace period to finish.
++ if unsafe { self.revoke_nosync() } {
++ // We revoked `self.0.data` before the devres action did, hence try to remove it.
++ if !DevresInner::remove_action(&self.0) {
++ // We could not remove the devres action, which means that it now runs concurrently,
++ // hence signal that `self.0.data` has been revoked successfully.
++ self.0.revoke.complete_all();
++ }
++ }
+ }
+ }
diff --git a/queue-6.15/rust-revocable-indicate-whether-data-has-been-revoked-already.patch b/queue-6.15/rust-revocable-indicate-whether-data-has-been-revoked-already.patch
new file mode 100644
index 0000000000..52526cd94d
--- /dev/null
+++ b/queue-6.15/rust-revocable-indicate-whether-data-has-been-revoked-already.patch
@@ -0,0 +1,78 @@
+From 4b76fafb20dd4a2becb94949d78e86bc88006509 Mon Sep 17 00:00:00 2001
+From: Danilo Krummrich <dakr@kernel.org>
+Date: Thu, 12 Jun 2025 14:17:14 +0200
+Subject: rust: revocable: indicate whether `data` has been revoked already
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+commit 4b76fafb20dd4a2becb94949d78e86bc88006509 upstream.
+
+Return a boolean from Revocable::revoke() and Revocable::revoke_nosync()
+to indicate whether the data has been revoked already.
+
+Return true if the data hasn't been revoked yet (i.e. this call revoked
+the data), false otherwise.
+
+This is required by Devres in order to synchronize the completion of the
+revoke process.
+
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://lore.kernel.org/r/20250612121817.1621-3-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/revocable.rs | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/rust/kernel/revocable.rs
++++ b/rust/kernel/revocable.rs
+@@ -126,8 +126,10 @@ impl<T> Revocable<T> {
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+- unsafe fn revoke_internal<const SYNC: bool>(&self) {
+- if self.is_available.swap(false, Ordering::Relaxed) {
++ unsafe fn revoke_internal<const SYNC: bool>(&self) -> bool {
++ let revoke = self.is_available.swap(false, Ordering::Relaxed);
++
++ if revoke {
+ if SYNC {
+ // SAFETY: Just an FFI call, there are no further requirements.
+ unsafe { bindings::synchronize_rcu() };
+@@ -137,6 +139,8 @@ impl<T> Revocable<T> {
+ // `compare_exchange` above that takes `is_available` from `true` to `false`.
+ unsafe { drop_in_place(self.data.get()) };
+ }
++
++ revoke
+ }
+
+ /// Revokes access to and drops the wrapped object.
+@@ -144,10 +148,13 @@ impl<T> Revocable<T> {
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`],
+ /// expecting that there are no concurrent users of the object.
+ ///
++ /// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked
++ /// already.
++ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there are no more concurrent users of the revocable object.
+- pub unsafe fn revoke_nosync(&self) {
++ pub unsafe fn revoke_nosync(&self) -> bool {
+ // SAFETY: By the safety requirement of this function, the caller ensures that nobody is
+ // accessing the data anymore and hence we don't have to wait for the grace period to
+ // finish.
+@@ -161,7 +168,10 @@ impl<T> Revocable<T> {
+ /// If there are concurrent users of the object (i.e., ones that called
+ /// [`Revocable::try_access`] beforehand and still haven't dropped the returned guard), this
+ /// function waits for the concurrent access to complete before dropping the wrapped object.
+- pub fn revoke(&self) {
++ ///
++ /// Returns `true` if `&self` has been revoked with this call, `false` if it was revoked
++ /// already.
++ pub fn revoke(&self) -> bool {
+ // SAFETY: By passing `true` we ask `revoke_internal` to wait for the grace period to
+ // finish.
+ unsafe { self.revoke_internal::<true>() }
diff --git a/queue-6.15/series b/queue-6.15/series
index 7a160cb126..b0a0236acb 100644
--- a/queue-6.15/series
+++ b/queue-6.15/series
@@ -252,3 +252,9 @@ drm-amd-display-fix-default-dc-and-ac-levels.patch
drm-amd-display-only-read-acpi-backlight-caps-once.patch
drm-amd-display-optimize-custom-brightness-curve.patch
drm-amd-display-export-full-brightness-range-to-user.patch
+rust-completion-implement-initial-abstraction.patch
+rust-revocable-indicate-whether-data-has-been-revoked-already.patch
+rust-devres-fix-race-in-devres-drop.patch
+rust-devres-do-not-dereference-to-the-internal-revocable.patch
+x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch
+x86-pkeys-simplify-pkru-update-in-signal-frame.patch
diff --git a/queue-6.15/x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch b/queue-6.15/x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch
new file mode 100644
index 0000000000..b004818a97
--- /dev/null
+++ b/queue-6.15/x86-fpu-refactor-xfeature-bitmask-update-code-for-sigframe-xsave.patch
@@ -0,0 +1,85 @@
+From 64e54461ab6e8524a8de4e63b7d1a3e4481b5cf3 Mon Sep 17 00:00:00 2001
+From: "Chang S. Bae" <chang.seok.bae@intel.com>
+Date: Tue, 15 Apr 2025 19:16:57 -0700
+Subject: x86/fpu: Refactor xfeature bitmask update code for sigframe XSAVE
+
+From: Chang S. Bae <chang.seok.bae@intel.com>
+
+commit 64e54461ab6e8524a8de4e63b7d1a3e4481b5cf3 upstream.
+
+Currently, saving register states in the signal frame, the legacy feature
+bits are always set in xregs_state->header->xfeatures. This code sequence
+can be generalized for reuse in similar cases.
+
+Refactor the logic to ensure a consistent approach across similar usages.
+
+Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Link: https://lore.kernel.org/r/20250416021720.12305-8-chang.seok.bae@intel.com
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/signal.c | 11 +----------
+ arch/x86/kernel/fpu/xstate.h | 13 +++++++++++++
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -114,7 +114,6 @@ static inline bool save_xstate_epilog(vo
+ {
+ struct xregs_state __user *x = buf;
+ struct _fpx_sw_bytes sw_bytes = {};
+- u32 xfeatures;
+ int err;
+
+ /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+@@ -128,12 +127,6 @@ static inline bool save_xstate_epilog(vo
+ (__u32 __user *)(buf + fpstate->user_size));
+
+ /*
+- * Read the xfeatures which we copied (directly from the cpu or
+- * from the state in task struct) to the user buffers.
+- */
+- err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
+-
+- /*
+ * For legacy compatible, we always set FP/SSE bits in the bit
+ * vector while saving the state to the user context. This will
+ * enable us capturing any changes(during sigreturn) to
+@@ -144,9 +137,7 @@ static inline bool save_xstate_epilog(vo
+ * header as well as change any contents in the memory layout.
+ * xrestore as part of sigreturn will capture all the changes.
+ */
+- xfeatures |= XFEATURE_MASK_FPSSE;
+-
+- err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
++ err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE);
+
+ return !err;
+ }
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -69,6 +69,19 @@ static inline u64 xfeatures_mask_indepen
+ return fpu_kernel_cfg.independent_features;
+ }
+
++static inline int set_xfeature_in_sigframe(struct xregs_state __user *xbuf, u64 mask)
++{
++ u64 xfeatures;
++ int err;
++
++ /* Read the xfeatures value already saved in the user buffer */
++ err = __get_user(xfeatures, &xbuf->header.xfeatures);
++ xfeatures |= mask;
++ err |= __put_user(xfeatures, &xbuf->header.xfeatures);
++
++ return err;
++}
++
+ /*
+ * Update the value of PKRU register that was already pushed onto the signal frame.
+ */
diff --git a/queue-6.15/x86-pkeys-simplify-pkru-update-in-signal-frame.patch b/queue-6.15/x86-pkeys-simplify-pkru-update-in-signal-frame.patch
new file mode 100644
index 0000000000..5b1c49ac70
--- /dev/null
+++ b/queue-6.15/x86-pkeys-simplify-pkru-update-in-signal-frame.patch
@@ -0,0 +1,69 @@
+From d1e420772cd1eb0afe5858619c73ce36f3e781a1 Mon Sep 17 00:00:00 2001
+From: "Chang S. Bae" <chang.seok.bae@intel.com>
+Date: Tue, 15 Apr 2025 19:16:58 -0700
+Subject: x86/pkeys: Simplify PKRU update in signal frame
+
+From: Chang S. Bae <chang.seok.bae@intel.com>
+
+commit d1e420772cd1eb0afe5858619c73ce36f3e781a1 upstream.
+
+The signal delivery logic was modified to always set the PKRU bit in
+xregs_state->header->xfeatures by this commit:
+
+ ae6012d72fa6 ("x86/pkeys: Ensure updated PKRU value is XRSTOR'd")
+
+However, the change derives the bitmask value using XGETBV(1), rather
+than simply updating the buffer that already holds the value. Thus, this
+approach induces an unnecessary dependency on XGETBV1 for PKRU handling.
+
+Eliminate the dependency by using the established helper function.
+Subsequently, remove the now-unused 'mask' argument.
+
+Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Aruna Ramakrishna <aruna.ramakrishna@oracle.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Tony W Wang-oc <TonyWWang-oc@zhaoxin.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/r/20250416021720.12305-9-chang.seok.bae@intel.com
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/fpu/xstate.h | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/fpu/xstate.h
++++ b/arch/x86/kernel/fpu/xstate.h
+@@ -85,18 +85,15 @@ static inline int set_xfeature_in_sigfra
+ /*
+ * Update the value of PKRU register that was already pushed onto the signal frame.
+ */
+-static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
++static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
+ {
+- u64 xstate_bv;
+ int err;
+
+ if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
+ return 0;
+
+ /* Mark PKRU as in-use so that it is restored correctly. */
+- xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
+-
+- err = __put_user(xstate_bv, &buf->header.xfeatures);
++ err = set_xfeature_in_sigframe(buf, XFEATURE_MASK_PKRU);
+ if (err)
+ return err;
+
+@@ -320,7 +317,7 @@ static inline int xsave_to_user_sigframe
+ clac();
+
+ if (!err)
+- err = update_pkru_in_sigframe(buf, mask, pkru);
++ err = update_pkru_in_sigframe(buf, pkru);
+
+ return err;
+ }