@@ -120,6 +120,9 @@ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid);
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
#define mlx5_vdpa_warn(__dev, format, ...) \
@@ -489,14 +489,6 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
}
}
-static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
-{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return;
-
- prune_iotlb(mvdev);
-}
-
static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
@@ -522,25 +514,14 @@ void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
mutex_lock(&mr->mkey_mtx);
_mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
- _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
mutex_unlock(&mr->mkey_mtx);
}
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
{
- mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
-}
-
-static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
- struct vhost_iotlb *iotlb,
- unsigned int asid)
-{
- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
- return 0;
-
- return dup_iotlb(mvdev, iotlb);
+ prune_iotlb(mvdev);
}
static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
@@ -572,22 +553,7 @@ static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
{
- int err;
-
- err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
- if (err)
- return err;
-
- err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
- if (err)
- goto out_err;
-
- return 0;
-
-out_err:
- _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
-
- return err;
+ return _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
@@ -620,7 +586,24 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
return err;
}
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+ struct vhost_iotlb *iotlb,
+ unsigned int asid)
+{
+ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+ return 0;
+
+ prune_iotlb(mvdev);
+ return dup_iotlb(mvdev, iotlb);
+}
+
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
{
- return mlx5_vdpa_create_mr(mvdev, NULL, 0);
+ int err;
+
+ err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
+ if (err)
+ return err;
+
+ return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
}
@@ -2884,10 +2884,13 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
return err;
}
- if (change_map)
+ if (change_map) {
err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
+ if (err)
+ return err;
+ }
- return err;
+ return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
}
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,