From patchwork Sat Jul 9 13:02:36 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pekka Enberg X-Patchwork-Id: 959552 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.4) with ESMTP id p69D3J0H004277 for ; Sat, 9 Jul 2011 13:03:32 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753357Ab1GINDS (ORCPT ); Sat, 9 Jul 2011 09:03:18 -0400 Received: from filtteri2.pp.htv.fi ([213.243.153.185]:59350 "EHLO filtteri2.pp.htv.fi" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753372Ab1GINDB (ORCPT ); Sat, 9 Jul 2011 09:03:01 -0400 Received: from localhost (localhost [127.0.0.1]) by filtteri2.pp.htv.fi (Postfix) with ESMTP id 72D8A1DF11A; Sat, 9 Jul 2011 16:03:00 +0300 (EEST) X-Virus-Scanned: Debian amavisd-new at pp.htv.fi Received: from smtp6.welho.com ([213.243.153.40]) by localhost (filtteri2.pp.htv.fi [213.243.153.185]) (amavisd-new, port 10024) with ESMTP id M1RfLXhdMMVd; Sat, 9 Jul 2011 16:03:00 +0300 (EEST) Received: from localhost.localdomain (cs181136138.pp.htv.fi [82.181.136.138]) by smtp6.welho.com (Postfix) with ESMTP id 0ADB25BC008; Sat, 9 Jul 2011 16:02:59 +0300 (EEST) From: Pekka Enberg To: kvm@vger.kernel.org Cc: Pekka Enberg , Asias He , Cyrill Gorcunov , Ingo Molnar , Prasad Joshi , Sasha Levin Subject: [PATCH 3/9] kvm tools, qcow: Fix locking issues Date: Sat, 9 Jul 2011 16:02:36 +0300 Message-Id: <1310216563-17503-4-git-send-email-penberg@kernel.org> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1310216563-17503-1-git-send-email-penberg@kernel.org> References: <1310216563-17503-1-git-send-email-penberg@kernel.org> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Sat, 09 Jul 2011 13:03:33 +0000 (UTC) The virtio_blk_do_io() function can enter the QCOW code through disk_image__{read,write,flush}() from multiple threads because it uses a thread pool for I/O requests. Thus, use locking to make the QCOW2 code thread-safe. Cc: Asias He Cc: Cyrill Gorcunov Cc: Ingo Molnar Cc: Prasad Joshi Cc: Sasha Levin Signed-off-by: Pekka Enberg --- tools/kvm/disk/qcow.c | 30 +++++++++++++++++++++--------- tools/kvm/include/kvm/mutex.h | 6 ++++++ tools/kvm/include/kvm/qcow.h | 3 +++ 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/tools/kvm/disk/qcow.c b/tools/kvm/disk/qcow.c index a1f6ef3..939bc61 100644 --- a/tools/kvm/disk/qcow.c +++ b/tools/kvm/disk/qcow.c @@ -2,6 +2,7 @@ #include "kvm/disk-image.h" #include "kvm/read-write.h" +#include "kvm/mutex.h" #include "kvm/util.h" #include @@ -232,16 +233,17 @@ static ssize_t qcow_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst_ l1_idx = get_l1_index(q, offset); if (l1_idx >= table->table_size) - goto out_error; + return -1; clust_offset = get_cluster_offset(q, offset); if (clust_offset >= cluster_size) - goto out_error; + return -1; length = cluster_size - clust_offset; if (length > dst_len) length = dst_len; + mutex_lock(&q->mutex); l2_table_offset = table->l1_table[l1_idx] & ~header->oflag_mask; if (!l2_table_offset) goto zero_cluster; @@ -261,19 +263,22 @@ static ssize_t qcow_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst_ if (!clust_start) goto zero_cluster; + mutex_unlock(&q->mutex); + if (pread_in_full(q->fd, dst, length, clust_start + clust_offset) < 0) - goto out_error; + return -1; -out: return length; zero_cluster: + mutex_unlock(&q->mutex); memset(dst, 0, length); - goto out; + return length; out_error: + mutex_unlock(&q->mutex); length = -1; - goto out; + return -1; } static ssize_t qcow_read_sector(struct disk_image *disk, u64 sector, void *dst, u32 dst_len) @@ -379,20 +384,22 @@ static ssize_t qcow_write_cluster(struct qcow *q, u64 offset, void *buf, u32 src l1t_idx = get_l1_index(q, offset); if (l1t_idx >= table->table_size) - goto error; + return -1; l2t_idx = get_l2_index(q, offset); if (l2t_idx >= l2t_sz) - goto error; + return -1; clust_off = get_cluster_offset(q, offset); if (clust_off >= clust_sz) - goto error; + return -1; len = clust_sz - clust_off; if (len > src_len) len = src_len; + mutex_lock(&q->mutex); + l2t_off = table->l1_table[l1t_idx] & ~header->oflag_mask; if (l2t_off) { /* read and cache l2 table */ @@ -466,11 +473,14 @@ static ssize_t qcow_write_cluster(struct qcow *q, u64 offset, void *buf, u32 src l2t->table[l2t_idx] = clust_start; } + mutex_unlock(&q->mutex); + return len; free_cache: free(l2t); error: + mutex_unlock(&q->mutex); return -1; } @@ -611,6 +621,7 @@ static struct disk_image *qcow2_probe(int fd, bool readonly) if (!q) goto error; + mutex_init(&q->mutex); q->fd = fd; q->root = RB_ROOT; INIT_LIST_HEAD(&q->lru_list); @@ -710,6 +721,7 @@ static struct disk_image *qcow1_probe(int fd, bool readonly) if (!q) goto error; + mutex_init(&q->mutex); q->fd = fd; q->root = RB_ROOT; INIT_LIST_HEAD(&q->lru_list); diff --git a/tools/kvm/include/kvm/mutex.h b/tools/kvm/include/kvm/mutex.h index bd765c4..3286cea 100644 --- a/tools/kvm/include/kvm/mutex.h +++ b/tools/kvm/include/kvm/mutex.h @@ -12,6 +12,12 @@ #define DEFINE_MUTEX(mutex) pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER +static inline void mutex_init(pthread_mutex_t *mutex) +{ + if (pthread_mutex_init(mutex, NULL) != 0) + die("unexpected pthread_mutex_init() failure!"); +} + static inline void mutex_lock(pthread_mutex_t *mutex) { if (pthread_mutex_lock(mutex) != 0) diff --git a/tools/kvm/include/kvm/qcow.h b/tools/kvm/include/kvm/qcow.h index 12247e0..d44c64a 100644 --- a/tools/kvm/include/kvm/qcow.h +++ b/tools/kvm/include/kvm/qcow.h @@ -1,6 +1,8 @@ #ifndef KVM__QCOW_H #define KVM__QCOW_H +#include "kvm/mutex.h" + #include #include #include @@ -34,6 +36,7 @@ struct qcow_table { }; struct qcow { + pthread_mutex_t mutex; void *header; struct qcow_table table; int fd;