diff mbox series

[V1,mlx5-next,4/4] vfio/mlx5: Run the SAVE state command in an async mode

Message ID 20220508131053.241347-5-yishaih@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Improve mlx5 live migration driver | expand

Commit Message

Yishai Hadas May 8, 2022, 1:10 p.m. UTC
Use the PF asynchronous command mode for the SAVE state command.

This enables returning earlier to user space upon issuing successfully
the command and improve latency by let things run in parallel.

Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
---
 drivers/vfio/pci/mlx5/cmd.c  | 72 ++++++++++++++++++++++++++++++++++--
 drivers/vfio/pci/mlx5/cmd.h  | 17 +++++++++
 drivers/vfio/pci/mlx5/main.c | 56 +++++++++++++++++++++++++---
 3 files changed, 136 insertions(+), 9 deletions(-)

Comments

Alex Williamson May 9, 2022, 5:29 p.m. UTC | #1
On Sun, 8 May 2022 16:10:53 +0300
Yishai Hadas <yishaih@nvidia.com> wrote:
> diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
> index 2a20b7435393..d053d314b745 100644
> --- a/drivers/vfio/pci/mlx5/cmd.h
> +++ b/drivers/vfio/pci/mlx5/cmd.h
> @@ -10,10 +10,20 @@
>  #include <linux/vfio_pci_core.h>
>  #include <linux/mlx5/driver.h>
>  
> +struct mlx5vf_async_data {
> +	struct mlx5_async_work cb_work;
> +	struct work_struct work;
> +	int status;
> +	u32 pdn;
> +	u32 mkey;
> +	void *out;
> +};
> +
>  struct mlx5_vf_migration_file {
>  	struct file *filp;
>  	struct mutex lock;
>  	bool disabled;
> +	u8 is_err:1;

Convert @disabled to bit field as well to pack these?

...
> @@ -558,6 +592,13 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
>  		return -ENOMEM;
>  	vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
>  	mlx5vf_cmd_set_migratable(mvdev);
> +	if (mvdev->migrate_cap) {
> +		mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
> +		if (!mvdev->cb_wq) {
> +			ret = -ENOMEM;
> +			goto out_free;
> +		}
> +	}

Should this be rolled into mlx5vf_cmd_set_migratable(), updating the
function to return -errno?

>  	ret = vfio_pci_core_register_device(&mvdev->core_device);
>  	if (ret)
>  		goto out_free;
> @@ -566,8 +607,11 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
>  	return 0;
>  
>  out_free:
> -	if (mvdev->migrate_cap)
> +	if (mvdev->migrate_cap) {
>  		mlx5vf_cmd_remove_migratable(mvdev);
> +		if (mvdev->cb_wq)
> +			destroy_workqueue(mvdev->cb_wq);
> +	}
>  	vfio_pci_core_uninit_device(&mvdev->core_device);
>  	kfree(mvdev);
>  	return ret;
> @@ -578,8 +622,10 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev)
>  	struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
>  
>  	vfio_pci_core_unregister_device(&mvdev->core_device);
> -	if (mvdev->migrate_cap)
> +	if (mvdev->migrate_cap) {
>  		mlx5vf_cmd_remove_migratable(mvdev);
> +		destroy_workqueue(mvdev->cb_wq);
> +	}
>  	vfio_pci_core_uninit_device(&mvdev->core_device);
>  	kfree(mvdev);
>  }

This looks like more evidence for expanding remove_migratable(),
rolling this in as well.  If this workqueue were setup in
set_migratable() we'd not need the special condition to test if cb_wq
is NULL while migrate_cap is set.  Thanks,

Alex
Yishai Hadas May 10, 2022, 8:56 a.m. UTC | #2
On 09/05/2022 20:29, Alex Williamson wrote:
> On Sun, 8 May 2022 16:10:53 +0300
> Yishai Hadas <yishaih@nvidia.com> wrote:
>> diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
>> index 2a20b7435393..d053d314b745 100644
>> --- a/drivers/vfio/pci/mlx5/cmd.h
>> +++ b/drivers/vfio/pci/mlx5/cmd.h
>> @@ -10,10 +10,20 @@
>>   #include <linux/vfio_pci_core.h>
>>   #include <linux/mlx5/driver.h>
>>   
>> +struct mlx5vf_async_data {
>> +	struct mlx5_async_work cb_work;
>> +	struct work_struct work;
>> +	int status;
>> +	u32 pdn;
>> +	u32 mkey;
>> +	void *out;
>> +};
>> +
>>   struct mlx5_vf_migration_file {
>>   	struct file *filp;
>>   	struct mutex lock;
>>   	bool disabled;
>> +	u8 is_err:1;
> Convert @disabled to bit field as well to pack these?

OK

>
> ...
>> @@ -558,6 +592,13 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
>>   		return -ENOMEM;
>>   	vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
>>   	mlx5vf_cmd_set_migratable(mvdev);
>> +	if (mvdev->migrate_cap) {
>> +		mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
>> +		if (!mvdev->cb_wq) {
>> +			ret = -ENOMEM;
>> +			goto out_free;
>> +		}
>> +	}
> Should this be rolled into mlx5vf_cmd_set_migratable(), updating the
> function to return -errno?

This can be done, however, I would still keep the function as void as 
you previously suggested.

In case the WQ somehow couldn't be created it just means that migratable 
functionality couldn't be activated and its cap won't be set.

>>   	ret = vfio_pci_core_register_device(&mvdev->core_device);
>>   	if (ret)
>>   		goto out_free;
>> @@ -566,8 +607,11 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
>>   	return 0;
>>   
>>   out_free:
>> -	if (mvdev->migrate_cap)
>> +	if (mvdev->migrate_cap) {
>>   		mlx5vf_cmd_remove_migratable(mvdev);
>> +		if (mvdev->cb_wq)
>> +			destroy_workqueue(mvdev->cb_wq);
>> +	}
>>   	vfio_pci_core_uninit_device(&mvdev->core_device);
>>   	kfree(mvdev);
>>   	return ret;
>> @@ -578,8 +622,10 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev)
>>   	struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
>>   
>>   	vfio_pci_core_unregister_device(&mvdev->core_device);
>> -	if (mvdev->migrate_cap)
>> +	if (mvdev->migrate_cap) {
>>   		mlx5vf_cmd_remove_migratable(mvdev);
>> +		destroy_workqueue(mvdev->cb_wq);
>> +	}
>>   	vfio_pci_core_uninit_device(&mvdev->core_device);
>>   	kfree(mvdev);
>>   }
> This looks like more evidence for expanding remove_migratable(),
> rolling this in as well.  If this workqueue were setup in
> set_migratable() we'd not need the special condition to test if cb_wq
> is NULL while migrate_cap is set.  Thanks,
>
> Alex
>
Makes sense, will be part of V2.

Yishai
diff mbox series

Patch

diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 9a6e3d3e0d44..0538e44e7eac 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -78,6 +78,7 @@  static int mlx5fv_vf_event(struct notifier_block *nb,
 		mvdev->mdev_detach = false;
 		break;
 	case MLX5_PF_NOTIFY_DISABLE_VF:
+		mlx5vf_disable_fds(mvdev);
 		mvdev->mdev_detach = true;
 		break;
 	default:
@@ -206,11 +207,56 @@  static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
 	return err;
 }
 
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
+{
+	struct mlx5vf_async_data *async_data = container_of(_work,
+		struct mlx5vf_async_data, work);
+	struct mlx5_vf_migration_file *migf = container_of(async_data,
+		struct mlx5_vf_migration_file, async_data);
+	struct mlx5_core_dev *mdev = migf->mvdev->mdev;
+
+	mutex_lock(&migf->lock);
+	if (async_data->status) {
+		migf->is_err = true;
+		wake_up_interruptible(&migf->poll_wait);
+	}
+	mutex_unlock(&migf->lock);
+
+	mlx5_core_destroy_mkey(mdev, async_data->mkey);
+	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
+	mlx5_core_dealloc_pd(mdev, async_data->pdn);
+	kvfree(async_data->out);
+	fput(migf->filp);
+}
+
+static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
+{
+	struct mlx5vf_async_data *async_data = container_of(context,
+			struct mlx5vf_async_data, cb_work);
+	struct mlx5_vf_migration_file *migf = container_of(async_data,
+			struct mlx5_vf_migration_file, async_data);
+
+	if (!status) {
+		WRITE_ONCE(migf->total_length,
+			   MLX5_GET(save_vhca_state_out, async_data->out,
+				    actual_image_size));
+		wake_up_interruptible(&migf->poll_wait);
+	}
+
+	/*
+	 * The error and the cleanup flows can't run from an
+	 * interrupt context
+	 */
+	async_data->status = status;
+	queue_work(migf->mvdev->cb_wq, &async_data->work);
+}
+
 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
 			       struct mlx5_vf_migration_file *migf)
 {
-	u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
+	u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
 	u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
+	struct mlx5vf_async_data *async_data;
 	struct mlx5_core_dev *mdev;
 	u32 pdn, mkey;
 	int err;
@@ -240,13 +286,31 @@  int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
 	MLX5_SET(save_vhca_state_in, in, mkey, mkey);
 	MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
 
-	err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out);
+	async_data = &migf->async_data;
+	async_data->out = kvzalloc(out_size, GFP_KERNEL);
+	if (!async_data->out) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/* no data exists till the callback comes back */
+	migf->total_length = 0;
+	get_file(migf->filp);
+	async_data->mkey = mkey;
+	async_data->pdn = pdn;
+	err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
+			       async_data->out,
+			       out_size, mlx5vf_save_callback,
+			       &async_data->cb_work);
 	if (err)
 		goto err_exec;
 
-	migf->total_length = MLX5_GET(save_vhca_state_out, out,
-				      actual_image_size);
+	return 0;
+
 err_exec:
+	fput(migf->filp);
+	kvfree(async_data->out);
+err_out:
 	mlx5_core_destroy_mkey(mdev, mkey);
 err_create_mkey:
 	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 2a20b7435393..d053d314b745 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -10,10 +10,20 @@ 
 #include <linux/vfio_pci_core.h>
 #include <linux/mlx5/driver.h>
 
+struct mlx5vf_async_data {
+	struct mlx5_async_work cb_work;
+	struct work_struct work;
+	int status;
+	u32 pdn;
+	u32 mkey;
+	void *out;
+};
+
 struct mlx5_vf_migration_file {
 	struct file *filp;
 	struct mutex lock;
 	bool disabled;
+	u8 is_err:1;
 
 	struct sg_append_table table;
 	size_t total_length;
@@ -23,6 +33,10 @@  struct mlx5_vf_migration_file {
 	struct scatterlist *last_offset_sg;
 	unsigned int sg_last_entry;
 	unsigned long last_offset;
+	struct mlx5vf_pci_core_device *mvdev;
+	wait_queue_head_t poll_wait;
+	struct mlx5_async_ctx async_ctx;
+	struct mlx5vf_async_data async_data;
 };
 
 struct mlx5vf_pci_core_device {
@@ -38,6 +52,7 @@  struct mlx5vf_pci_core_device {
 	spinlock_t reset_lock;
 	struct mlx5_vf_migration_file *resuming_migf;
 	struct mlx5_vf_migration_file *saving_migf;
+	struct workqueue_struct *cb_wq;
 	struct notifier_block nb;
 	struct mlx5_core_dev *mdev;
 	u8 mdev_detach:1;
@@ -54,4 +69,6 @@  int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
 			       struct mlx5_vf_migration_file *migf);
 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
+void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
 #endif /* MLX5_VFIO_CMD_H */
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 5bda6c0e194c..625ed5743f26 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -134,12 +134,22 @@  static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
 		return -ESPIPE;
 	pos = &filp->f_pos;
 
+	if (!(filp->f_flags & O_NONBLOCK)) {
+		if (wait_event_interruptible(migf->poll_wait,
+			     READ_ONCE(migf->total_length) || migf->is_err))
+			return -ERESTARTSYS;
+	}
+
 	mutex_lock(&migf->lock);
+	if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) {
+		done = -EAGAIN;
+		goto out_unlock;
+	}
 	if (*pos > migf->total_length) {
 		done = -EINVAL;
 		goto out_unlock;
 	}
-	if (migf->disabled) {
+	if (migf->disabled || migf->is_err) {
 		done = -ENODEV;
 		goto out_unlock;
 	}
@@ -179,9 +189,28 @@  static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len,
 	return done;
 }
 
+static __poll_t mlx5vf_save_poll(struct file *filp,
+				 struct poll_table_struct *wait)
+{
+	struct mlx5_vf_migration_file *migf = filp->private_data;
+	__poll_t pollflags = 0;
+
+	poll_wait(filp, &migf->poll_wait, wait);
+
+	mutex_lock(&migf->lock);
+	if (migf->disabled || migf->is_err)
+		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+	else if (READ_ONCE(migf->total_length))
+		pollflags = EPOLLIN | EPOLLRDNORM;
+	mutex_unlock(&migf->lock);
+
+	return pollflags;
+}
+
 static const struct file_operations mlx5vf_save_fops = {
 	.owner = THIS_MODULE,
 	.read = mlx5vf_save_read,
+	.poll = mlx5vf_save_poll,
 	.release = mlx5vf_release_file,
 	.llseek = no_llseek,
 };
@@ -207,7 +236,9 @@  mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
 
 	stream_open(migf->filp->f_inode, migf->filp);
 	mutex_init(&migf->lock);
-
+	init_waitqueue_head(&migf->poll_wait);
+	mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
+	INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
 	ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
 						    &migf->total_length);
 	if (ret)
@@ -218,6 +249,7 @@  mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev)
 	if (ret)
 		goto out_free;
 
+	migf->mvdev = mvdev;
 	ret = mlx5vf_cmd_save_vhca_state(mvdev, migf);
 	if (ret)
 		goto out_free;
@@ -323,7 +355,7 @@  mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
 	return migf;
 }
 
-static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
+void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
 {
 	if (mvdev->resuming_migf) {
 		mlx5vf_disable_fd(mvdev->resuming_migf);
@@ -331,6 +363,8 @@  static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
 		mvdev->resuming_migf = NULL;
 	}
 	if (mvdev->saving_migf) {
+		mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
+		cancel_work_sync(&mvdev->saving_migf->async_data.work);
 		mlx5vf_disable_fd(mvdev->saving_migf);
 		fput(mvdev->saving_migf->filp);
 		mvdev->saving_migf = NULL;
@@ -558,6 +592,13 @@  static int mlx5vf_pci_probe(struct pci_dev *pdev,
 		return -ENOMEM;
 	vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops);
 	mlx5vf_cmd_set_migratable(mvdev);
+	if (mvdev->migrate_cap) {
+		mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
+		if (!mvdev->cb_wq) {
+			ret = -ENOMEM;
+			goto out_free;
+		}
+	}
 	ret = vfio_pci_core_register_device(&mvdev->core_device);
 	if (ret)
 		goto out_free;
@@ -566,8 +607,11 @@  static int mlx5vf_pci_probe(struct pci_dev *pdev,
 	return 0;
 
 out_free:
-	if (mvdev->migrate_cap)
+	if (mvdev->migrate_cap) {
 		mlx5vf_cmd_remove_migratable(mvdev);
+		if (mvdev->cb_wq)
+			destroy_workqueue(mvdev->cb_wq);
+	}
 	vfio_pci_core_uninit_device(&mvdev->core_device);
 	kfree(mvdev);
 	return ret;
@@ -578,8 +622,10 @@  static void mlx5vf_pci_remove(struct pci_dev *pdev)
 	struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
 
 	vfio_pci_core_unregister_device(&mvdev->core_device);
-	if (mvdev->migrate_cap)
+	if (mvdev->migrate_cap) {
 		mlx5vf_cmd_remove_migratable(mvdev);
+		destroy_workqueue(mvdev->cb_wq);
+	}
 	vfio_pci_core_uninit_device(&mvdev->core_device);
 	kfree(mvdev);
 }