From patchwork Wed Oct 25 16:47:26 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mike Christie X-Patchwork-Id: 10027009 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id E5C5560375 for ; Wed, 25 Oct 2017 16:48:03 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id D631128BEC for ; Wed, 25 Oct 2017 16:48:03 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id CB1CC28BEE; Wed, 25 Oct 2017 16:48:03 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 5DFF528BEC for ; Wed, 25 Oct 2017 16:48:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932237AbdJYQsC (ORCPT ); Wed, 25 Oct 2017 12:48:02 -0400 Received: from mx1.redhat.com ([209.132.183.28]:57134 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932210AbdJYQsC (ORCPT ); Wed, 25 Oct 2017 12:48:02 -0400 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 5809A25C48; Wed, 25 Oct 2017 16:48:02 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 5809A25C48 Authentication-Results: ext-mx06.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx06.extmail.prod.ext.phx2.redhat.com; spf=fail smtp.mailfrom=mchristi@redhat.com Received: from rh2.redhat.com (ovpn-124-200.rdu2.redhat.com [10.10.124.200]) by smtp.corp.redhat.com (Postfix) with ESMTP id 1414F60BE1; Wed, 25 Oct 2017 16:47:59 +0000 (UTC) From: Mike Christie To: target-devel@vger.kernel.org, nab@linux-iscsi.org Cc: Mike Christie Subject: [PATCH 16/20] tcmu: fix tcmu_irqcontrol and unmap race Date: Wed, 25 Oct 2017 11:47:26 -0500 Message-Id: <1508950050-10120-17-git-send-email-mchristi@redhat.com> In-Reply-To: <1508950050-10120-1-git-send-email-mchristi@redhat.com> References: <1508950050-10120-1-git-send-email-mchristi@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.30]); Wed, 25 Oct 2017 16:48:02 +0000 (UTC) Sender: target-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: target-devel@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP If the unmap thread has passed the find_free_blocks call but has not yet hit the prepare_to_wait we will miss any tcmu_irqcontrol wake_up calls. This patch replaces the our kthread use with a work_struct which will handle this for us and allow use to remove the race checks for the time out and queueing wake up calls. Signed-off-by: Mike Christie --- drivers/target/target_core_user.c | 72 +++++++-------------------------------- 1 file changed, 12 insertions(+), 60 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 5eb919a..4fe5249 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -179,9 +179,6 @@ struct tcmu_cmd { unsigned long flags; }; -static struct task_struct *unmap_thread; -static wait_queue_head_t unmap_wait; - static DEFINE_SPINLOCK(timed_out_udevs_lock); static LIST_HEAD(timed_out_udevs); @@ -203,6 +200,7 @@ static DEFINE_SPINLOCK(root_udev_waiter_lock); static LIST_HEAD(root_udev_waiter); static atomic_t global_db_count = ATOMIC_INIT(0); +struct work_struct tcmu_unmap_work; static struct kmem_cache *tcmu_cmd_cache; @@ -822,7 +820,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) pr_debug("adding %s to block waiter list\n", udev->name); list_add_tail(&udev->waiter, &root_udev_waiter); - wake_up(&unmap_wait); + schedule_work(&tcmu_unmap_work); } spin_unlock(&root_udev_waiter_lock); return 0; @@ -1165,7 +1163,7 @@ static void tcmu_device_timedout(unsigned long data) list_add_tail(&udev->timedout_entry, &timed_out_udevs); spin_unlock(&timed_out_udevs_lock); - wake_up(&unmap_wait); + schedule_work(&tcmu_unmap_work); } static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) @@ -1284,7 +1282,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) * of it. */ if (!list_empty(&tcmu_dev->waiter)) { - wake_up(&unmap_wait); + schedule_work(&tcmu_unmap_work); } else { tcmu_handle_completions(tcmu_dev); run_cmdr_queue(tcmu_dev); @@ -2296,50 +2294,11 @@ static void check_timedout_devices(void) spin_unlock_bh(&timed_out_udevs_lock); } -static int unmap_thread_fn(void *data) +static void tcmu_unmap_work_fn(struct work_struct *work) { - bool drained = true; - bool has_block_waiters; - bool has_timed_out_devs; - - while (!kthread_should_stop()) { - DEFINE_WAIT(__wait); - - prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); - /* - * If we had space left, check if devs were added/readded - * while the lock was dropped. - */ - spin_lock(&root_udev_waiter_lock); - has_block_waiters = true; - if (list_empty(&root_udev_waiter)) - has_block_waiters = false; - spin_unlock(&root_udev_waiter_lock); - - spin_lock_bh(&timed_out_udevs_lock); - has_timed_out_devs = true; - if (list_empty(&timed_out_udevs)) - has_timed_out_devs = false; - spin_unlock_bh(&timed_out_udevs_lock); - - /* - * Handle race where new waiters were added and we still - * had space (were at least able to drain the queue on - * the previous run). - */ - if ((!drained || !has_block_waiters) && !has_timed_out_devs) - schedule(); - - finish_wait(&unmap_wait, &__wait); - - check_timedout_devices(); - - find_free_blocks(); - - drained = run_cmdr_queues(); - } - - return 0; + check_timedout_devices(); + find_free_blocks(); + run_cmdr_queues(); } static int __init tcmu_module_init(void) @@ -2348,6 +2307,8 @@ static int __init tcmu_module_init(void) BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); + INIT_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); + tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", sizeof(struct tcmu_cmd), __alignof__(struct tcmu_cmd), @@ -2393,17 +2354,8 @@ static int __init tcmu_module_init(void) if (ret) goto out_attrs; - init_waitqueue_head(&unmap_wait); - unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); - if (IS_ERR(unmap_thread)) { - ret = PTR_ERR(unmap_thread); - goto out_unreg_transport; - } - return 0; -out_unreg_transport: - target_backend_unregister(&tcmu_ops); out_attrs: kfree(tcmu_attrs); out_unreg_genl: @@ -2418,7 +2370,7 @@ static int __init tcmu_module_init(void) static void __exit tcmu_module_exit(void) { - kthread_stop(unmap_thread); + cancel_work_sync(&tcmu_unmap_work); target_backend_unregister(&tcmu_ops); kfree(tcmu_attrs); genl_unregister_family(&tcmu_genl_family);