diff mbox series

[7/7] dm kcopyd: add simple copy offload support

Message ID 20210817101423.12367-8-selvakuma.s1@samsung.com (mailing list archive)
State New, archived
Headers show
Series [1/7] block: make bio_map_kern() non static | expand

Commit Message

SelvaKumar S Aug. 17, 2021, 10:14 a.m. UTC
Introduce copy_jobs to use copy-offload, if supported by underlying devices
otherwise fall back to existing method.

run_copy_jobs() calls block layer copy offload API, if both source and
destination request queue are same and support copy offload.
On successful completion, destination regions copied count is made zero,
failed regions are processed via existing method.

Signed-off-by: SelvaKumar S <selvakuma.s1@samsung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
---
 drivers/md/dm-kcopyd.c | 56 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 6 deletions(-)

Comments

Mikulas Patocka Aug. 17, 2021, 8:29 p.m. UTC | #1
On Tue, 17 Aug 2021, SelvaKumar S wrote:

> Introduce copy_jobs to use copy-offload, if supported by underlying devices
> otherwise fall back to existing method.

dm-kcopyd is usually used on the dm-linear target. And this patchset 
doesn't support passing copy requests through the linear target - so this 
patch doesn't seem useful.

Mikulas

> run_copy_jobs() calls block layer copy offload API, if both source and
> destination request queue are same and support copy offload.
> On successful completion, destination regions copied count is made zero,
> failed regions are processed via existing method.
> 
> Signed-off-by: SelvaKumar S <selvakuma.s1@samsung.com>
> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
> ---
>  drivers/md/dm-kcopyd.c | 56 +++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 50 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
> index 37b03ab7e5c9..d9ee105a6127 100644
> --- a/drivers/md/dm-kcopyd.c
> +++ b/drivers/md/dm-kcopyd.c
> @@ -74,18 +74,20 @@ struct dm_kcopyd_client {
>  	atomic_t nr_jobs;
>  
>  /*
> - * We maintain four lists of jobs:
> + * We maintain five lists of jobs:
>   *
> - * i)   jobs waiting for pages
> - * ii)  jobs that have pages, and are waiting for the io to be issued.
> - * iii) jobs that don't need to do any IO and just run a callback
> - * iv) jobs that have completed.
> + * i)	jobs waiting to try copy offload
> + * ii)   jobs waiting for pages
> + * iii)  jobs that have pages, and are waiting for the io to be issued.
> + * iv) jobs that don't need to do any IO and just run a callback
> + * v) jobs that have completed.
>   *
> - * All four of these are protected by job_lock.
> + * All five of these are protected by job_lock.
>   */
>  	spinlock_t job_lock;
>  	struct list_head callback_jobs;
>  	struct list_head complete_jobs;
> +	struct list_head copy_jobs;
>  	struct list_head io_jobs;
>  	struct list_head pages_jobs;
>  };
> @@ -579,6 +581,43 @@ static int run_io_job(struct kcopyd_job *job)
>  	return r;
>  }
>  
> +static int run_copy_job(struct kcopyd_job *job)
> +{
> +	int r, i, count = 0;
> +	unsigned long flags = 0;
> +	struct range_entry srange;
> +
> +	struct request_queue *src_q, *dest_q;
> +
> +	for (i = 0; i < job->num_dests; i++) {
> +		srange.src = job->source.sector;
> +		srange.len = job->source.count;
> +
> +		src_q = bdev_get_queue(job->source.bdev);
> +		dest_q = bdev_get_queue(job->dests[i].bdev);
> +
> +		if (src_q != dest_q && !src_q->limits.copy_offload)
> +			break;
> +
> +		r = blkdev_issue_copy(job->source.bdev, 1, &srange,
> +			job->dests[i].bdev, job->dests[i].sector, GFP_KERNEL, flags);
> +		if (r)
> +			break;
> +
> +		job->dests[i].count = 0;
> +		count++;
> +	}
> +
> +	if (count == job->num_dests) {
> +		push(&job->kc->complete_jobs, job);
> +	} else {
> +		push(&job->kc->pages_jobs, job);
> +		r = 0;
> +	}
> +
> +	return r;
> +}
> +
>  static int run_pages_job(struct kcopyd_job *job)
>  {
>  	int r;
> @@ -659,6 +698,7 @@ static void do_work(struct work_struct *work)
>  	spin_unlock_irq(&kc->job_lock);
>  
>  	blk_start_plug(&plug);
> +	process_jobs(&kc->copy_jobs, kc, run_copy_job);
>  	process_jobs(&kc->complete_jobs, kc, run_complete_job);
>  	process_jobs(&kc->pages_jobs, kc, run_pages_job);
>  	process_jobs(&kc->io_jobs, kc, run_io_job);
> @@ -676,6 +716,8 @@ static void dispatch_job(struct kcopyd_job *job)
>  	atomic_inc(&kc->nr_jobs);
>  	if (unlikely(!job->source.count))
>  		push(&kc->callback_jobs, job);
> +	else if (job->source.bdev->bd_disk == job->dests[0].bdev->bd_disk)
> +		push(&kc->copy_jobs, job);
>  	else if (job->pages == &zero_page_list)
>  		push(&kc->io_jobs, job);
>  	else
> @@ -916,6 +958,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
>  	spin_lock_init(&kc->job_lock);
>  	INIT_LIST_HEAD(&kc->callback_jobs);
>  	INIT_LIST_HEAD(&kc->complete_jobs);
> +	INIT_LIST_HEAD(&kc->copy_jobs);
>  	INIT_LIST_HEAD(&kc->io_jobs);
>  	INIT_LIST_HEAD(&kc->pages_jobs);
>  	kc->throttle = throttle;
> @@ -971,6 +1014,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
>  
>  	BUG_ON(!list_empty(&kc->callback_jobs));
>  	BUG_ON(!list_empty(&kc->complete_jobs));
> +	WARN_ON(!list_empty(&kc->copy_jobs));
>  	BUG_ON(!list_empty(&kc->io_jobs));
>  	BUG_ON(!list_empty(&kc->pages_jobs));
>  	destroy_workqueue(kc->kcopyd_wq);
> -- 
> 2.25.1
> 
> 
> --
> dm-devel mailing list
> dm-devel@redhat.com
> https://listman.redhat.com/mailman/listinfo/dm-devel
> 

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel
diff mbox series

Patch

diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 37b03ab7e5c9..d9ee105a6127 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -74,18 +74,20 @@  struct dm_kcopyd_client {
 	atomic_t nr_jobs;
 
 /*
- * We maintain four lists of jobs:
+ * We maintain five lists of jobs:
  *
- * i)   jobs waiting for pages
- * ii)  jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that don't need to do any IO and just run a callback
- * iv) jobs that have completed.
+ * i)	jobs waiting to try copy offload
+ * ii)   jobs waiting for pages
+ * iii)  jobs that have pages, and are waiting for the io to be issued.
+ * iv) jobs that don't need to do any IO and just run a callback
+ * v) jobs that have completed.
  *
- * All four of these are protected by job_lock.
+ * All five of these are protected by job_lock.
  */
 	spinlock_t job_lock;
 	struct list_head callback_jobs;
 	struct list_head complete_jobs;
+	struct list_head copy_jobs;
 	struct list_head io_jobs;
 	struct list_head pages_jobs;
 };
@@ -579,6 +581,43 @@  static int run_io_job(struct kcopyd_job *job)
 	return r;
 }
 
+static int run_copy_job(struct kcopyd_job *job)
+{
+	int r, i, count = 0;
+	unsigned long flags = 0;
+	struct range_entry srange;
+
+	struct request_queue *src_q, *dest_q;
+
+	for (i = 0; i < job->num_dests; i++) {
+		srange.src = job->source.sector;
+		srange.len = job->source.count;
+
+		src_q = bdev_get_queue(job->source.bdev);
+		dest_q = bdev_get_queue(job->dests[i].bdev);
+
+		if (src_q != dest_q && !src_q->limits.copy_offload)
+			break;
+
+		r = blkdev_issue_copy(job->source.bdev, 1, &srange,
+			job->dests[i].bdev, job->dests[i].sector, GFP_KERNEL, flags);
+		if (r)
+			break;
+
+		job->dests[i].count = 0;
+		count++;
+	}
+
+	if (count == job->num_dests) {
+		push(&job->kc->complete_jobs, job);
+	} else {
+		push(&job->kc->pages_jobs, job);
+		r = 0;
+	}
+
+	return r;
+}
+
 static int run_pages_job(struct kcopyd_job *job)
 {
 	int r;
@@ -659,6 +698,7 @@  static void do_work(struct work_struct *work)
 	spin_unlock_irq(&kc->job_lock);
 
 	blk_start_plug(&plug);
+	process_jobs(&kc->copy_jobs, kc, run_copy_job);
 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
 	process_jobs(&kc->io_jobs, kc, run_io_job);
@@ -676,6 +716,8 @@  static void dispatch_job(struct kcopyd_job *job)
 	atomic_inc(&kc->nr_jobs);
 	if (unlikely(!job->source.count))
 		push(&kc->callback_jobs, job);
+	else if (job->source.bdev->bd_disk == job->dests[0].bdev->bd_disk)
+		push(&kc->copy_jobs, job);
 	else if (job->pages == &zero_page_list)
 		push(&kc->io_jobs, job);
 	else
@@ -916,6 +958,7 @@  struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
 	spin_lock_init(&kc->job_lock);
 	INIT_LIST_HEAD(&kc->callback_jobs);
 	INIT_LIST_HEAD(&kc->complete_jobs);
+	INIT_LIST_HEAD(&kc->copy_jobs);
 	INIT_LIST_HEAD(&kc->io_jobs);
 	INIT_LIST_HEAD(&kc->pages_jobs);
 	kc->throttle = throttle;
@@ -971,6 +1014,7 @@  void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
 
 	BUG_ON(!list_empty(&kc->callback_jobs));
 	BUG_ON(!list_empty(&kc->complete_jobs));
+	WARN_ON(!list_empty(&kc->copy_jobs));
 	BUG_ON(!list_empty(&kc->io_jobs));
 	BUG_ON(!list_empty(&kc->pages_jobs));
 	destroy_workqueue(kc->kcopyd_wq);