@@ -10,6 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
#include <drm/drm_ioctl.h>
#include <kunit/test.h>
#include <linux/auxiliary_bus.h>
new file mode 100644
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+#include <drm/drm_gpuvm.h>
+
+#ifdef CONFIG_DRM
+#ifdef CONFIG_DRM_GPUVM
+
+struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj)
+{
+ return drm_gpuvm_get(obj);
+}
+
+void rust_helper_drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init_from_op(va, op);
+}
+
+struct drm_gpuvm_bo *rust_helper_drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ return drm_gpuvm_bo_get(vm_bo);
+}
+
+bool rust_helper_drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj)
+{
+ return drm_gpuvm_is_extobj(gpuvm, obj);
+}
+
+#endif /* CONFIG_DRM_GPUVM */
+#endif /* CONFIG_DRM */
@@ -16,6 +16,7 @@
#include "device.c"
#include "dma-resv.c"
#include "drm.c"
+#include "drm_gpuvm.c"
#include "err.c"
#include "fs.c"
#include "io.c"
new file mode 100644
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! GPUVM abstractions.
+//!
+//! Only synchronous operations are supported, and the GEM's `dma_resv` is used
+//! as the GEM's gpuva lock.
+//!
+//! C header: [`include/drm/drm_gpuvm.h`](srctree/include/drm/drm_gpuvm.h)
+
+use core::cell::UnsafeCell;
+use core::marker::{PhantomData, PhantomPinned};
+use core::ops::{Deref, DerefMut, Range};
+use core::ptr::NonNull;
+
+use crate::bindings;
+use crate::drm::device;
+use crate::drm::drv;
+use crate::drm::gem::IntoGEMObject;
+use crate::error::code::ENOMEM;
+use crate::error::from_result;
+use crate::error::to_result;
+use crate::error::Result;
+use crate::init;
+use crate::init::pin_init_from_closure;
+use crate::prelude::*;
+use crate::sync::lock::Backend;
+use crate::sync::lock::Guard;
+use crate::types::ARef;
+use crate::types::AlwaysRefCounted;
+use crate::types::Opaque;
+
+// SAFETY: This type is safe to zero-initialize.
+unsafe impl init::Zeroable for bindings::drm_gpuvm_bo {}
+
+#[allow(type_alias_bounds)]
+// A convenience type for the driver's GEM object.
+type DriverObject<T: DriverGpuVm> = <T::Driver as drv::Driver>::Object;
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVm (a GPU address space).
+pub trait DriverGpuVm: Sized {
+ /// The parent `Driver` implementation for this `DriverGpuVm`.
+ type Driver: drv::Driver;
+
+ /// The driver-specific GpuVa type.
+ type GpuVa: DriverGpuVa;
+
+ /// The driver-specific GpuVmBo type.
+ type GpuVmBo: DriverGpuVmBo;
+
+ /// The driver-specific context that is kept through the map, unmap and
+ /// remap steps.
+ type StepContext;
+
+ /// Implements the map step for the driver.
+ fn step_map(
+ self: &mut UpdatingGpuVm<'_, Self>,
+ op: &mut OpMap<Self>,
+ ctx: &mut Self::StepContext,
+ ) -> Result;
+
+ /// Implements the unmap step for the driver.
+ fn step_unmap(
+ self: &mut UpdatingGpuVm<'_, Self>,
+ op: &mut OpUnMap<Self>,
+ ctx: &mut Self::StepContext,
+ ) -> Result;
+
+ /// Implements the remap step for the driver.
+ fn step_remap(
+ self: &mut UpdatingGpuVm<'_, Self>,
+ op: &mut OpReMap<Self>,
+ vm_bo: &GpuVmBo<Self>,
+ ctx: &mut Self::StepContext,
+ ) -> Result;
+}
+
+/// The driver-specific context that is passed through the map, unmap and remap
+/// steps.
+struct StepContext<'a, T: DriverGpuVm> {
+ gpuvm: &'a GpuVm<T>,
+ ctx: &'a mut T::StepContext,
+}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVa (a mapping in GPU address space).
+pub trait DriverGpuVa: Sized {}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVmBo (a connection between a BO and a VM).
+pub trait DriverGpuVmBo: Sized {
+ /// Initializes the GpuVmBo object.
+ fn new() -> impl PinInit<Self>;
+}
+
+/// Provide a default implementation for trivial types
+impl<T: Default> DriverGpuVmBo for T {
+ fn new() -> impl PinInit<Self> {
+ // SAFETY:
+ // - Ok(()) is always returned.
+ // - The slot is never moved.
+ unsafe {
+ pin_init_from_closure(|slot| {
+ *slot = Self::default();
+ Ok(())
+ })
+ }
+ }
+}
+
+#[repr(transparent)]
+/// A transparent wrapper over `drm_gpuva_op_map`.
+///
+/// This encodes the map operation to be carried out by the driver.
+pub struct OpMap<T: DriverGpuVm>(bindings::drm_gpuva_op_map, PhantomData<T>);
+
+#[repr(transparent)]
+/// Represents a single remap operation generated by [`GpuVm`].
+///
+/// A remap operation is generated when an existing GPU VA mapping is split by
+/// inserting a new one or by partially unmapping existing mappings. Hence, it
+/// consists of a maximum of two maps and one unmap operation,
+///
+/// The unmap operation takes care of removing the original existing mapping.
+/// The `prev` field is used to remap the preceding part and `next` is used to
+/// remap the subsequent part.
+///
+/// If the start address of the new mapping aligns with the start address of the
+/// old mapping, `prev` will be `None`. Similarly, if the end address of the new
+/// mapping aligns with the end address of the old mapping, `next` will be
+/// `None`.
+
+/// Note: the reason for a dedicated remap operation, rather than arbitrary
+/// unmap and map operations, is to give drivers the chance of extracting driver
+/// specific data for creating the new mappings from the unmap operations's
+/// [`GpuVa`] structure which typically is embedded in larger driver specific
+/// structures.
+pub struct OpReMap<T: DriverGpuVm>(bindings::drm_gpuva_op_remap, PhantomData<T>);
+
+impl<T: DriverGpuVm> OpMap<T> {
+ #[inline]
+ /// Returns the base address of the new mapping.
+ pub fn addr(&self) -> u64 {
+ self.0.va.addr
+ }
+
+ #[inline]
+ /// Returns the range of the new mapping.
+ pub fn range(&self) -> u64 {
+ self.0.va.range
+ }
+
+ #[inline]
+ /// Returns the offset within the GEM object.
+ pub fn offset(&self) -> u64 {
+ self.0.gem.offset
+ }
+
+ #[inline]
+ /// Returns the GEM object to map.
+ pub fn object(&self) -> &<T::Driver as drv::Driver>::Object {
+ let p = <<T::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(self.0.gem.obj);
+ // SAFETY: The GEM object has an active reference for the lifetime of this op
+ unsafe { &*p }
+ }
+
+ /// A helper function to map and link a GpuVa to a GpuVmBo.
+ pub fn map_and_link_va(
+ &mut self,
+ gpuvm: &mut UpdatingGpuVm<'_, T>,
+ gpuva: Pin<KBox<GpuVa<T>>>,
+ gpuvmbo: &ARef<GpuVmBo<T>>,
+ ) -> Result<(), Pin<KBox<GpuVa<T>>>> {
+ // SAFETY: We are handing off the GpuVa ownership and it will not be moved.
+ let p = KBox::leak(unsafe { Pin::into_inner_unchecked(gpuva) });
+ // SAFETY: These C functions are called with the correct invariants
+ unsafe {
+ bindings::drm_gpuva_init_from_op(&mut p.gpuva, &mut self.0);
+ if bindings::drm_gpuva_insert(gpuvm.0.gpuvm() as *mut _, &mut p.gpuva) != 0 {
+ // EEXIST, return the GpuVa to the caller as an error
+ return Err(Pin::new_unchecked(KBox::from_raw(p)));
+ };
+ // SAFETY: This takes a new reference to the gpuvmbo.
+ bindings::drm_gpuva_link(&mut p.gpuva, &gpuvmbo.bo as *const _ as *mut _);
+ }
+ Ok(())
+ }
+}
+
+#[repr(transparent)]
+/// Represents a single unmap operation generated by [`GpuVm`].
+pub struct OpUnMap<T: DriverGpuVm>(bindings::drm_gpuva_op_unmap, PhantomData<T>);
+
+impl<T: DriverGpuVm> OpUnMap<T> {
+ #[inline]
+ /// Returns the GPU VA to unmap.
+ pub fn va(&self) -> Option<&GpuVa<T>> {
+ if self.0.va.is_null() {
+ return None;
+ }
+ // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+ let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+ // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+ Some(unsafe { &*p })
+ }
+
+ #[inline]
+ /// Indicates whether this GPU VA is physically contiguous with the original
+ /// mapping request.
+ ///
+ /// Optionally, if `keep` is set, drivers may keep the actual page table
+ /// mappings for this GPU VA, adding the missing page table entries only and
+ /// subsequently updating the VM accordingly.
+ pub fn keep(&self) -> bool {
+ self.0.keep
+ }
+
+ /// A helper to unmap and unlink a GpuVa from a GpuVmBo.
+ pub fn unmap_and_unlink_va(&mut self) -> Option<Pin<KBox<GpuVa<T>>>> {
+ if self.0.va.is_null() {
+ return None;
+ }
+ // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+ let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+
+ // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+ unsafe {
+ bindings::drm_gpuva_unmap(&mut self.0);
+ bindings::drm_gpuva_unlink(self.0.va);
+ }
+
+ // Unlinking/unmapping relinquishes ownership of the GpuVa object,
+ // so clear the pointer
+ self.0.va = core::ptr::null_mut();
+ // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+ Some(unsafe { Pin::new_unchecked(KBox::from_raw(p)) })
+ }
+}
+
+impl<T: DriverGpuVm> OpReMap<T> {
+ #[inline]
+ /// Returns the preceding part of a split mapping, if any.
+ pub fn prev_map(&mut self) -> Option<&mut OpMap<T>> {
+ // SAFETY: The prev pointer must be valid if not-NULL per the op_remap contract
+ unsafe { (self.0.prev as *mut OpMap<T>).as_mut() }
+ }
+
+ #[inline]
+ /// Returns the subsequent part of a split mapping, if any.
+ pub fn next_map(&mut self) -> Option<&mut OpMap<T>> {
+ // SAFETY: The next pointer must be valid if not-NULL per the op_remap contract
+ unsafe { (self.0.next as *mut OpMap<T>).as_mut() }
+ }
+
+ #[inline]
+ /// Returns the unmap operation for the original existing mapping.
+ pub fn unmap(&mut self) -> &mut OpUnMap<T> {
+ // SAFETY: The unmap pointer is always valid per the op_remap contract
+ unsafe { (self.0.unmap as *mut OpUnMap<T>).as_mut().unwrap() }
+ }
+}
+
+#[repr(C)]
+#[pin_data]
+/// A GPU VA range.
+///
+/// Drivers can use `inner` to store additional data.
+pub struct GpuVa<T: DriverGpuVm> {
+ #[pin]
+ gpuva: bindings::drm_gpuva,
+ #[pin]
+ inner: T::GpuVa,
+ #[pin]
+ _p: PhantomPinned,
+}
+
+// SAFETY: This type is safe to zero-init (as far as C is concerned).
+unsafe impl init::Zeroable for bindings::drm_gpuva {}
+
+impl<T: DriverGpuVm> GpuVa<T> {
+ /// Creates a new GPU VA.
+ pub fn new<E>(inner: impl PinInit<T::GpuVa, E>) -> Result<Pin<KBox<GpuVa<T>>>>
+ where
+ Error: From<E>,
+ {
+ KBox::try_pin_init(
+ try_pin_init!(Self {
+ gpuva <- init::zeroed(),
+ inner <- inner,
+ _p: PhantomPinned
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline]
+ /// Returns the start address of the GPU VA range.
+ pub fn addr(&self) -> u64 {
+ self.gpuva.va.addr
+ }
+
+ #[inline]
+ /// Returns the range of the GPU VA.
+ pub fn range(&self) -> u64 {
+ self.gpuva.va.range
+ }
+
+ #[inline]
+ /// Returns the offset within the GEM object.
+ pub fn offset(&self) -> u64 {
+ self.gpuva.gem.offset
+ }
+}
+
+#[repr(C)]
+#[pin_data]
+/// The connection between a GEM object and a VM.
+pub struct GpuVmBo<T: DriverGpuVm> {
+ #[pin]
+ bo: bindings::drm_gpuvm_bo,
+ #[pin]
+ inner: T::GpuVmBo,
+ #[pin]
+ _p: PhantomPinned,
+}
+
+impl<T: DriverGpuVm> GpuVmBo<T> {
+ /// Return a reference to the inner driver data for this GpuVmBo
+ pub fn inner(&self) -> &T::GpuVmBo {
+ &self.inner
+ }
+}
+
+// SAFETY: DRM GpuVmBo objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVmBo<T> {
+ fn inc_ref(&self) {
+ // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+ unsafe { bindings::drm_gpuvm_bo_get(&self.bo as *const _ as *mut _) };
+ }
+
+ unsafe fn dec_ref(mut obj: NonNull<Self>) {
+ // SAFETY: drm_gpuvm_bo_put() requires holding the gpuva lock, which is the dma_resv lock by default.
+ // The drm_gpuvm_put function satisfies the requirements for dec_ref().
+ // (We do not support custom locks yet.)
+ unsafe {
+ let resv = (*obj.as_mut().bo.obj).resv;
+ bindings::dma_resv_lock(resv, core::ptr::null_mut());
+ bindings::drm_gpuvm_bo_put(&mut obj.as_mut().bo);
+ bindings::dma_resv_unlock(resv);
+ }
+ }
+}
+
+/// The DRM GPU VA Manager.
+///
+/// It keeps track of a GPU's virtual address space by using maple tree
+/// structures.
+///
+/// Typically, an instance of [`GpuVm`] is embedded bigger, driver-specific
+/// structures.
+///
+/// Drivers can pass addresses and ranges in arbitrary units, e.g.: bytes or
+/// pages.
+///
+/// There should be one manager instance per GPU virtual address space.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVm<T: DriverGpuVm> {
+ #[pin]
+ gpuvm: Opaque<bindings::drm_gpuvm>,
+ #[pin]
+ inner: UnsafeCell<T>,
+ #[pin]
+ _p: PhantomPinned,
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn vm_free_callback<T: DriverGpuVm>(raw_gpuvm: *mut bindings::drm_gpuvm) {
+ // SAFETY: Container invariant is guaranteed for objects using our callback.
+ let p = unsafe {
+ crate::container_of!(
+ raw_gpuvm as *mut Opaque<bindings::drm_gpuvm>,
+ GpuVm<T>,
+ gpuvm
+ ) as *mut GpuVm<T>
+ };
+
+ // SAFETY: p is guaranteed to be valid for drm_gpuvm objects using this callback.
+ unsafe { drop(KBox::from_raw(p)) };
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn vm_bo_alloc_callback<T: DriverGpuVm>() -> *mut bindings::drm_gpuvm_bo {
+ let obj: Result<Pin<KBox<GpuVmBo<T>>>> = KBox::try_pin_init(
+ try_pin_init!(GpuVmBo::<T> {
+ bo <- init::zeroed(),
+ inner <- T::GpuVmBo::new(),
+ _p: PhantomPinned
+ }),
+ GFP_KERNEL,
+ );
+
+ match obj {
+ Ok(obj) =>
+ // SAFETY: The DRM core will keep this object pinned
+ unsafe {
+ let p = KBox::leak(Pin::into_inner_unchecked(obj));
+ &mut p.bo
+ },
+ Err(_) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn vm_bo_free_callback<T: DriverGpuVm>(raw_vm_bo: *mut bindings::drm_gpuvm_bo) {
+ // SAFETY: Container invariant is guaranteed for objects using this callback.
+ let p = unsafe { crate::container_of!(raw_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+
+ // SAFETY: p is guaranteed to be valid for drm_gpuvm_bo objects using this callback.
+ unsafe { drop(KBox::from_raw(p)) };
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn step_map_callback<T: DriverGpuVm>(
+ op: *mut bindings::drm_gpuva_op,
+ _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+ // SAFETY: We know this is a map op, and OpMap is a transparent wrapper.
+ let map = unsafe { &mut *((&mut (*op).__bindgen_anon_1.map) as *mut _ as *mut OpMap<T>) };
+ // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+ // guaranteed to outlive this function.
+ let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+ from_result(|| {
+ UpdatingGpuVm(ctx.gpuvm).step_map(map, ctx.ctx)?;
+ Ok(0)
+ })
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn step_remap_callback<T: DriverGpuVm>(
+ op: *mut bindings::drm_gpuva_op,
+ _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+ // SAFETY: We know this is a map op, and OpReMap is a transparent wrapper.
+ let remap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.remap) as *mut _ as *mut OpReMap<T>) };
+ // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+ // guaranteed to outlive this function.
+ let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+ let p_vm_bo = remap.unmap().va().unwrap().gpuva.vm_bo;
+
+ let res = {
+ // SAFETY: vm_bo pointer must be valid and non-null by the step_remap invariants.
+ // Since we grab a ref, this reference's lifetime is until the decref.
+ let vm_bo_ref = unsafe {
+ bindings::drm_gpuvm_bo_get(p_vm_bo);
+ &*(crate::container_of!(p_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T>)
+ };
+
+ from_result(|| {
+ UpdatingGpuVm(ctx.gpuvm).step_remap(remap, vm_bo_ref, ctx.ctx)?;
+ Ok(0)
+ })
+ };
+
+ // SAFETY: We incremented the refcount above, and the Rust reference we took is
+ // no longer in scope.
+ unsafe { bindings::drm_gpuvm_bo_put(p_vm_bo) };
+
+ res
+}
+
+/// # Safety
+///
+/// This function is only safe to be called from the GPUVM C code.
+unsafe extern "C" fn step_unmap_callback<T: DriverGpuVm>(
+ op: *mut bindings::drm_gpuva_op,
+ _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+ // SAFETY: We know this is a map op, and OpUnMap is a transparent wrapper.
+ let unmap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.unmap) as *mut _ as *mut OpUnMap<T>) };
+ // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+ // guaranteed to outlive this function.
+ let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+ from_result(|| {
+ UpdatingGpuVm(ctx.gpuvm).step_unmap(unmap, ctx.ctx)?;
+ Ok(0)
+ })
+}
+
+impl<T: DriverGpuVm> GpuVm<T> {
+ const OPS: bindings::drm_gpuvm_ops = bindings::drm_gpuvm_ops {
+ vm_free: Some(vm_free_callback::<T>),
+ op_alloc: None,
+ op_free: None,
+ vm_bo_alloc: Some(vm_bo_alloc_callback::<T>),
+ vm_bo_free: Some(vm_bo_free_callback::<T>),
+ vm_bo_validate: None,
+ sm_step_map: Some(step_map_callback::<T>),
+ sm_step_remap: Some(step_remap_callback::<T>),
+ sm_step_unmap: Some(step_unmap_callback::<T>),
+ };
+
+ fn gpuvm(&self) -> *const bindings::drm_gpuvm {
+ self.gpuvm.get()
+ }
+
+ /// Creates a GPUVM instance.
+ pub fn new<E>(
+ name: &'static CStr,
+ dev: &device::Device<T::Driver>,
+ r_obj: &<T::Driver as drv::Driver>::Object,
+ range: Range<u64>,
+ reserve_range: Range<u64>,
+ inner: impl PinInit<T, E>,
+ ) -> Result<ARef<GpuVm<T>>>
+ where
+ Error: From<E>,
+ {
+ let obj: Pin<KBox<Self>> = KBox::try_pin_init(
+ try_pin_init!(Self {
+ // SAFETY: drm_gpuvm_init cannot fail and always initializes the member
+ gpuvm <- unsafe {
+ init::pin_init_from_closure(move |slot: *mut Opaque<bindings::drm_gpuvm> | {
+ // Zero-init required by drm_gpuvm_init
+ *slot = core::mem::zeroed();
+ bindings::drm_gpuvm_init(
+ Opaque::raw_get(slot),
+ name.as_char_ptr(),
+ 0,
+ dev.as_raw(),
+ r_obj.gem_obj() as *const _ as *mut _,
+ range.start,
+ range.end - range.start,
+ reserve_range.start,
+ reserve_range.end - reserve_range.start,
+ &Self::OPS
+ );
+ Ok(())
+ })
+ },
+ // SAFETY: Just passing through to the initializer argument
+ inner <- unsafe {
+ init::pin_init_from_closure(move |slot: *mut UnsafeCell<T> | {
+ inner.__pinned_init(slot as *mut _)
+ })
+ },
+ _p: PhantomPinned
+ }),
+ GFP_KERNEL,
+ )?;
+
+ // SAFETY: We never move out of the object
+ let vm_ref = unsafe {
+ ARef::from_raw(NonNull::new_unchecked(KBox::leak(
+ Pin::into_inner_unchecked(obj),
+ )))
+ };
+
+ Ok(vm_ref)
+ }
+
+ /// Locks the VM, protecting its interval tree against concurrent accesses.
+ ///
+ /// Callers must prove that they have exclusive access to the VM by holding
+ /// some guard type. This encodes the driver-specific locking requirements.
+ ///
+ /// It is up to the caller to ensure that the guard indeed provides the
+ /// required locking.
+ pub fn lock<U: ?Sized, B: Backend>(&self, _guard: &mut Guard<'_, U, B>) -> LockedGpuVm<'_, T> {
+ LockedGpuVm { gpuvm: self }
+ }
+
+ /// Returns true if the given object is external to the GPUVM, i.e.: if it
+ /// does not share the DMA reservation object of the GPUVM.
+ pub fn is_extobj(&self, obj: &impl IntoGEMObject) -> bool {
+ let gem = obj.gem_obj() as *const _ as *mut _;
+ // SAFETY: This is safe to call as long as the arguments are valid pointers.
+ unsafe { bindings::drm_gpuvm_is_extobj(self.gpuvm() as *mut _, gem) }
+ }
+}
+
+// SAFETY: DRM GpuVm objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVm<T> {
+ fn inc_ref(&self) {
+ // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+ unsafe { bindings::drm_gpuvm_get(&self.gpuvm as *const _ as *mut _) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The drm_gpuvm_put function satisfies the requirements for dec_ref().
+ unsafe { bindings::drm_gpuvm_put(Opaque::raw_get(&(*obj.as_ptr()).gpuvm)) };
+ }
+}
+
+/// The object returned after a call to [`GpuVm::lock`].
+///
+/// This object has access to operations that modify the VM's interval tree.
+pub struct LockedGpuVm<'a, T: DriverGpuVm> {
+ gpuvm: &'a GpuVm<T>,
+}
+
+impl<T: DriverGpuVm> LockedGpuVm<'_, T> {
+ /// Finds the [`GpuVmBo`] object that connects `obj` to this VM.
+ ///
+ /// If found, increases the reference count of the GpuVmBo object
+ /// accordingly.
+ pub fn find_bo(&mut self, obj: &DriverObject<T>) -> Option<ARef<GpuVmBo<T>>> {
+ // SAFETY: LockedGpuVm implies the right locks are held.
+ let p = unsafe {
+ bindings::drm_gpuvm_bo_find(
+ self.gpuvm.gpuvm() as *mut _,
+ obj.gem_obj() as *const _ as *mut _,
+ )
+ };
+ if p.is_null() {
+ None
+ } else {
+ // SAFETY: All the drm_gpuvm_bo objects in this GpuVm are always allocated by us as GpuVmBo<T>.
+ let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+ // SAFETY: We checked for NULL above, and the types ensure that
+ // this object was created by vm_bo_alloc_callback<T>.
+ Some(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+ }
+ }
+
+ /// Obtains the [`GpuVmBo`] object that connects `obj` to this VM.
+ ///
+ /// This connection is unique, so an instane of [`GpuVmBo`] will be
+ /// allocated for `obj` once, and that instance will be returned from that
+ /// point forward.
+ pub fn obtain_bo(&mut self, obj: &DriverObject<T>) -> Result<ARef<GpuVmBo<T>>> {
+ // SAFETY: LockedGpuVm implies the right locks are held.
+ let p = unsafe {
+ bindings::drm_gpuvm_bo_obtain(
+ self.gpuvm.gpuvm() as *mut _,
+ obj.gem_obj() as *const _ as *mut _,
+ )
+ };
+ if p.is_null() {
+ Err(ENOMEM)
+ } else {
+ // SAFETY: Container invariant is guaranteed for GpuVmBo objects for this GpuVm.
+ let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+ // SAFETY: We checked for NULL above, and the types ensure that
+ // this object was created by vm_bo_alloc_callback<T>.
+ Ok(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+ }
+ }
+
+ /// Iterates the given range of the GPU VA space. It utilizes
+ /// [`DriverGpuVm`] to call back into the driver providing the split and
+ /// merge steps.
+ ///
+ /// A sequence of callbacks can contain map, unmap and remap operations, but
+ /// the sequence of callbacks might also be empty if no operation is
+ /// required, e.g. if the requested mapping already exists in the exact same
+ /// way.
+ ///
+ /// There can be an arbitrary amount of unmap operations, a maximum of two
+ /// remap operations and a single map operation. The latter one represents
+ /// the original map operation requested by the caller.
+ ///
+ /// # Arguments
+ ///
+ /// - `ctx`: A driver-specific context.
+ /// - `req_obj`: The GEM object to map.
+ /// - `req_addr`: The start address of the new mapping.
+ /// - `req_range`: The range of the mapping.
+ /// - `req_offset`: The offset into the GEM object.
+ pub fn sm_map(
+ &mut self,
+ ctx: &mut T::StepContext,
+ req_obj: &DriverObject<T>,
+ req_addr: u64,
+ req_range: u64,
+ req_offset: u64,
+ ) -> Result {
+ let mut ctx = StepContext {
+ ctx,
+ gpuvm: self.gpuvm,
+ };
+
+ // SAFETY: LockedGpuVm implies the right locks are held.
+ to_result(unsafe {
+ bindings::drm_gpuvm_sm_map(
+ self.gpuvm.gpuvm() as *mut _,
+ &mut ctx as *mut _ as *mut _,
+ req_addr,
+ req_range,
+ req_obj.gem_obj() as *const _ as *mut _,
+ req_offset,
+ )
+ })
+ }
+
+ /// Iterates the given range of the GPU VA space. It utilizes
+ /// [`DriverGpuVm`] to call back into the driver providing the operations to
+ /// unmap and, if required, split existent mappings.
+ ///
+ /// A sequence of callbacks can contain unmap and remap operations,
+ /// depending on whether there are actual overlapping mappings to split.
+ ///
+ /// There can be an arbitrary amount of unmap operations and a maximum of
+ /// two remap operations.
+ ///
+ /// # Arguments
+ ///
+ /// - `ctx`: A driver-specific context.
+ /// - `req_addr`: The start address of the range to unmap.
+ /// - `req_range`: The range of the mappings to unmap.
+ pub fn sm_unmap(&mut self, ctx: &mut T::StepContext, req_addr: u64, req_range: u64) -> Result {
+ let mut ctx = StepContext {
+ ctx,
+ gpuvm: self.gpuvm,
+ };
+
+ // SAFETY: LockedGpuVm implies the right locks are held.
+ to_result(unsafe {
+ bindings::drm_gpuvm_sm_unmap(
+ self.gpuvm.gpuvm() as *mut _,
+ &mut ctx as *mut _ as *mut _,
+ req_addr,
+ req_range,
+ )
+ })
+ }
+}
+
+impl<T: DriverGpuVm> Deref for LockedGpuVm<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: The existence of this LockedGpuVm implies the lock is held,
+ // so this is the only reference
+ unsafe { &*self.gpuvm.inner.get() }
+ }
+}
+
+impl<T: DriverGpuVm> DerefMut for LockedGpuVm<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+ // so this is the only reference
+ unsafe { &mut *self.gpuvm.inner.get() }
+ }
+}
+
+/// A state representing a GPU VM that is being updated.
+pub struct UpdatingGpuVm<'a, T: DriverGpuVm>(&'a GpuVm<T>);
+
+impl<T: DriverGpuVm> UpdatingGpuVm<'_, T> {}
+
+impl<T: DriverGpuVm> Deref for UpdatingGpuVm<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+ // so this is the only reference
+ unsafe { &*self.0.inner.get() }
+ }
+}
+
+impl<T: DriverGpuVm> DerefMut for UpdatingGpuVm<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+ // so this is the only reference
+ unsafe { &mut *self.0.inner.get() }
+ }
+}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVm<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVm<T> {}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVmBo<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVmBo<T> {}
@@ -6,4 +6,6 @@
pub mod drv;
pub mod file;
pub mod gem;
+#[cfg(CONFIG_DRM_GPUVM = "y")]
+pub mod gpuvm;
pub mod ioctl;