@@ -305,6 +305,7 @@ struct iommufd_hwpt_paging {
struct iommufd_hwpt_nested {
struct iommufd_hw_pagetable common;
struct iommufd_hwpt_paging *parent;
+ struct iommufd_viommu *viommu;
};
static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
@@ -430,7 +430,7 @@ enum iommu_hwpt_data_type {
* @size: sizeof(struct iommu_hwpt_alloc)
* @flags: Combination of enum iommufd_hwpt_alloc_flags
* @dev_id: The device to allocate this HWPT for
- * @pt_id: The IOAS or HWPT to connect this HWPT to
+ * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to
* @out_hwpt_id: The ID of the new HWPT
* @__reserved: Must be 0
* @data_type: One of enum iommu_hwpt_data_type
@@ -449,11 +449,11 @@ enum iommu_hwpt_data_type {
* IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
* nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
*
- * A user-managed nested HWPT will be created from a given parent HWPT via
- * @pt_id, in which the parent HWPT must be allocated previously via the
- * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
- * must be set to a pre-defined type corresponding to an I/O page table
- * type supported by the underlying IOMMU hardware.
+ * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a
+ * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be
+ * allocated previously via the same ioctl from a given IOAS (@pt_id). In this
+ * case, the @data_type must be set to a pre-defined type corresponding to an
+ * I/O page table type supported by the underlying IOMMU hardware.
*
* If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
* @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
@@ -57,6 +57,9 @@ void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
container_of(obj, struct iommufd_hwpt_nested, common.obj);
__iommufd_hwpt_destroy(&hwpt_nested->common);
+
+ if (hwpt_nested->viommu)
+ refcount_dec(&hwpt_nested->viommu->obj.users);
refcount_dec(&hwpt_nested->parent->common.obj.users);
}
@@ -213,6 +216,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
*/
static struct iommufd_hwpt_nested *
iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
+ struct iommufd_viommu *viommu,
struct iommufd_hwpt_paging *parent,
struct iommufd_device *idev, u32 flags,
const struct iommu_user_data *user_data)
@@ -235,13 +239,16 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
return ERR_CAST(hwpt_nested);
hwpt = &hwpt_nested->common;
+ if (viommu)
+ refcount_inc(&viommu->obj.users);
+ hwpt_nested->viommu = viommu;
refcount_inc(&parent->common.obj.users);
hwpt_nested->parent = parent;
hwpt->domain = ops->domain_alloc_user(idev->dev,
flags & ~IOMMU_HWPT_FAULT_ID_VALID,
parent->common.domain,
- NULL, user_data);
+ viommu, user_data);
if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL;
@@ -308,7 +315,7 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
struct iommufd_hwpt_nested *hwpt_nested;
hwpt_nested = iommufd_hwpt_nested_alloc(
- ucmd->ictx,
+ ucmd->ictx, NULL,
container_of(pt_obj, struct iommufd_hwpt_paging,
common.obj),
idev, cmd->flags, &user_data);
@@ -317,6 +324,19 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
goto out_unlock;
}
hwpt = &hwpt_nested->common;
+ } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
+ struct iommufd_hwpt_nested *hwpt_nested;
+ struct iommufd_viommu *viommu;
+
+ viommu = container_of(pt_obj, struct iommufd_viommu, obj);
+ hwpt_nested = iommufd_hwpt_nested_alloc(
+ ucmd->ictx, viommu, viommu->hwpt, idev,
+ cmd->flags, &user_data);
+ if (IS_ERR(hwpt_nested)) {
+ rc = PTR_ERR(hwpt_nested);
+ goto out_unlock;
+ }
+ hwpt = &hwpt_nested->common;
} else {
rc = -EINVAL;
goto out_put_pt;