From patchwork Tue May 7 06:54:36 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Asias He X-Patchwork-Id: 2532261 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 49909E014A for ; Tue, 7 May 2013 06:55:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758134Ab3EGGy7 (ORCPT ); Tue, 7 May 2013 02:54:59 -0400 Received: from mx1.redhat.com ([209.132.183.28]:28124 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756979Ab3EGGy6 (ORCPT ); Tue, 7 May 2013 02:54:58 -0400 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r476stoj007469 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Tue, 7 May 2013 02:54:56 -0400 Received: from hj.localdomain.com ([10.66.4.126]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id r476sdxa008236; Tue, 7 May 2013 02:54:53 -0400 From: Asias He To: "Michael S. Tsirkin" Cc: Nicholas Bellinger , Rusty Russell , kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, target-devel@vger.kernel.org, Asias He Subject: [PATCH 4/4] vhost: Remove custom vhost rcu usage Date: Tue, 7 May 2013 14:54:36 +0800 Message-Id: <1367909676-11788-5-git-send-email-asias@redhat.com> In-Reply-To: <1367909676-11788-1-git-send-email-asias@redhat.com> References: <1367909676-11788-1-git-send-email-asias@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Now, vq->private_data is always accessed under vq mutex. No need to play the vhost rcu trick. Signed-off-by: Asias He --- drivers/vhost/net.c | 16 ++++++---------- drivers/vhost/scsi.c | 6 ++---- drivers/vhost/test.c | 11 ++++------- drivers/vhost/vhost.h | 10 ++-------- 4 files changed, 14 insertions(+), 29 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index b616d9a..05bdc3c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -751,8 +750,7 @@ static int vhost_net_enable_vq(struct vhost_net *n, struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct socket *sock; - sock = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); + sock = vq->private_data; if (!sock) return 0; @@ -765,10 +763,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, struct socket *sock; mutex_lock(&vq->mutex); - sock = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); + sock = vq->private_data; vhost_net_disable_vq(n, vq); - rcu_assign_pointer(vq->private_data, NULL); + vq->private_data = NULL; mutex_unlock(&vq->mutex); return sock; } @@ -924,8 +921,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) } /* start polling new socket */ - oldsock = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); + oldsock = vq->private_data; if (sock != oldsock) { ubufs = vhost_net_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); @@ -935,7 +931,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) } vhost_net_disable_vq(n, vq); - rcu_assign_pointer(vq->private_data, sock); + vq->private_data = sock; r = vhost_init_used(vq); if (r) goto err_used; @@ -969,7 +965,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) return 0; err_used: - rcu_assign_pointer(vq->private_data, oldsock); + vq->private_data = oldsock; vhost_net_enable_vq(n, vq); if (ubufs) vhost_net_ubuf_put_and_wait(ubufs); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index d78768b..de6d817 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1223,9 +1223,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, sizeof(vs->vs_vhost_wwpn)); for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; - /* Flushing the vhost_work acts as synchronize_rcu */ mutex_lock(&vq->mutex); - rcu_assign_pointer(vq->private_data, vs_tpg); + vq->private_data = vs_tpg; vhost_init_used(vq); mutex_unlock(&vq->mutex); } @@ -1304,9 +1303,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { vq = &vs->vqs[i].vq; - /* Flushing the vhost_work acts as synchronize_rcu */ mutex_lock(&vq->mutex); - rcu_assign_pointer(vq->private_data, NULL); + vq->private_data = NULL; mutex_unlock(&vq->mutex); } } diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 435b911..5f23477 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include @@ -139,9 +138,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n, void *private; mutex_lock(&vq->mutex); - private = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); - rcu_assign_pointer(vq->private_data, NULL); + private = vq->private_data; + vq->private_data = NULL; mutex_unlock(&vq->mutex); return private; } @@ -205,9 +203,8 @@ static long vhost_test_run(struct vhost_test *n, int test) priv = test ? n : NULL; /* start polling new socket */ - oldpriv = rcu_dereference_protected(vq->private_data, - lockdep_is_held(&vq->mutex)); - rcu_assign_pointer(vq->private_data, priv); + oldpriv = vq->private_data; + vq->private_data = priv; r = vhost_init_used(&n->vqs[index].vq); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 51aeb5f..2a8c655 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -103,14 +103,8 @@ struct vhost_virtqueue { struct iovec iov[UIO_MAXIOV]; struct iovec *indirect; struct vring_used_elem *heads; - /* We use a kind of RCU to access private pointer. - * All readers access it from worker, which makes it possible to - * flush the vhost_work instead of synchronize_rcu. Therefore readers do - * not need to call rcu_read_lock/rcu_read_unlock: the beginning of - * vhost_work execution acts instead of rcu_read_lock() and the end of - * vhost_work execution acts instead of rcu_read_unlock(). - * Writers use virtqueue mutex. */ - void __rcu *private_data; + /* Protected by virtqueue mutex. */ + void *private_data; /* Log write descriptors */ void __user *log_base; struct vhost_log *log;