diff mbox

[RFC,1/3] kvm tools: use mutex abstraction instead of pthread mutex

Message ID 1351098010-20849-1-git-send-email-sasha.levin@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sasha Levin Oct. 24, 2012, 5 p.m. UTC
We already have something to wrap pthread with mutex_[init,lock,unlock]
calls. This patch creates a new struct mutex abstraction and moves
everything to work with it.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
 tools/kvm/hw/serial.c              | 10 +++++-----
 tools/kvm/include/kvm/mutex.h      | 22 ++++++++++++++--------
 tools/kvm/include/kvm/qcow.h       |  2 +-
 tools/kvm/include/kvm/threadpool.h |  4 ++--
 tools/kvm/include/kvm/uip.h        | 10 +++++-----
 tools/kvm/net/uip/buf.c            |  4 ++--
 tools/kvm/net/uip/core.c           |  6 +++---
 tools/kvm/net/uip/tcp.c            |  6 +++---
 tools/kvm/net/uip/udp.c            |  2 +-
 tools/kvm/util/threadpool.c        |  8 ++++----
 tools/kvm/virtio/blk.c             |  4 ++--
 tools/kvm/virtio/console.c         |  4 ++--
 tools/kvm/virtio/net.c             | 14 +++++++-------
 13 files changed, 51 insertions(+), 45 deletions(-)

Comments

Pekka Enberg Oct. 25, 2012, 6:50 a.m. UTC | #1
On Wed, 24 Oct 2012, Sasha Levin wrote:

> We already have something to wrap pthread with mutex_[init,lock,unlock]
> calls. This patch creates a new struct mutex abstraction and moves
> everything to work with it.
> 
> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>

I applied this patch from the RFC series, thanks Sasha!
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/tools/kvm/hw/serial.c b/tools/kvm/hw/serial.c
index a177a7f..53b684a 100644
--- a/tools/kvm/hw/serial.c
+++ b/tools/kvm/hw/serial.c
@@ -22,7 +22,7 @@ 
 #define UART_IIR_TYPE_BITS	0xc0
 
 struct serial8250_device {
-	pthread_mutex_t		mutex;
+	struct mutex		mutex;
 	u8			id;
 
 	u16			iobase;
@@ -55,7 +55,7 @@  struct serial8250_device {
 static struct serial8250_device devices[] = {
 	/* ttyS0 */
 	[0]	= {
-		.mutex			= PTHREAD_MUTEX_INITIALIZER,
+		.mutex			= MUTEX_INITIALIZER,
 
 		.id			= 0,
 		.iobase			= 0x3f8,
@@ -65,7 +65,7 @@  static struct serial8250_device devices[] = {
 	},
 	/* ttyS1 */
 	[1]	= {
-		.mutex			= PTHREAD_MUTEX_INITIALIZER,
+		.mutex			= MUTEX_INITIALIZER,
 
 		.id			= 1,
 		.iobase			= 0x2f8,
@@ -75,7 +75,7 @@  static struct serial8250_device devices[] = {
 	},
 	/* ttyS2 */
 	[2]	= {
-		.mutex			= PTHREAD_MUTEX_INITIALIZER,
+		.mutex			= MUTEX_INITIALIZER,
 
 		.id			= 2,
 		.iobase			= 0x3e8,
@@ -85,7 +85,7 @@  static struct serial8250_device devices[] = {
 	},
 	/* ttyS3 */
 	[3]	= {
-		.mutex			= PTHREAD_MUTEX_INITIALIZER,
+		.mutex			= MUTEX_INITIALIZER,
 
 		.id			= 3,
 		.iobase			= 0x2e8,
diff --git a/tools/kvm/include/kvm/mutex.h b/tools/kvm/include/kvm/mutex.h
index 3286cea..4f31025 100644
--- a/tools/kvm/include/kvm/mutex.h
+++ b/tools/kvm/include/kvm/mutex.h
@@ -10,23 +10,29 @@ 
  * to write user-space code! :-)
  */
 
-#define DEFINE_MUTEX(mutex) pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER
-
-static inline void mutex_init(pthread_mutex_t *mutex)
+struct mutex {
+	pthread_mutex_t mutex;
+};
+#define MUTEX_INITIALIZER (struct mutex) { .mutex = PTHREAD_MUTEX_INITIALIZER }
+				
+#define DEFINE_MUTEX(mtx) struct mutex mtx = MUTEX_INITIALIZER
+
+static inline void mutex_init(struct mutex *lock)
 {
-	if (pthread_mutex_init(mutex, NULL) != 0)
+	if (pthread_mutex_init(&lock->mutex, NULL) != 0)
 		die("unexpected pthread_mutex_init() failure!");
 }
 
-static inline void mutex_lock(pthread_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *lock)
 {
-	if (pthread_mutex_lock(mutex) != 0)
+	if (pthread_mutex_lock(&lock->mutex) != 0)
 		die("unexpected pthread_mutex_lock() failure!");
+
 }
 
-static inline void mutex_unlock(pthread_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *lock)
 {
-	if (pthread_mutex_unlock(mutex) != 0)
+	if (pthread_mutex_unlock(&lock->mutex) != 0)
 		die("unexpected pthread_mutex_unlock() failure!");
 }
 
diff --git a/tools/kvm/include/kvm/qcow.h b/tools/kvm/include/kvm/qcow.h
index e032a1e..f849246 100644
--- a/tools/kvm/include/kvm/qcow.h
+++ b/tools/kvm/include/kvm/qcow.h
@@ -74,7 +74,7 @@  struct qcow_header {
 };
 
 struct qcow {
-	pthread_mutex_t			mutex;
+	struct mutex			mutex;
 	struct qcow_header		*header;
 	struct qcow_l1_table		table;
 	struct qcow_refcount_table	refcount_table;
diff --git a/tools/kvm/include/kvm/threadpool.h b/tools/kvm/include/kvm/threadpool.h
index abe46ea..bacb243 100644
--- a/tools/kvm/include/kvm/threadpool.h
+++ b/tools/kvm/include/kvm/threadpool.h
@@ -15,7 +15,7 @@  struct thread_pool__job {
 	void				*data;
 
 	int				signalcount;
-	pthread_mutex_t			mutex;
+	struct mutex			mutex;
 
 	struct list_head		queue;
 };
@@ -26,7 +26,7 @@  static inline void thread_pool__init_job(struct thread_pool__job *job, struct kv
 		.kvm		= kvm,
 		.callback	= callback,
 		.data		= data,
-		.mutex		= PTHREAD_MUTEX_INITIALIZER,
+		.mutex		= MUTEX_INITIALIZER,
 	};
 }
 
diff --git a/tools/kvm/include/kvm/uip.h b/tools/kvm/include/kvm/uip.h
index 9af0110..ac248d2 100644
--- a/tools/kvm/include/kvm/uip.h
+++ b/tools/kvm/include/kvm/uip.h
@@ -187,14 +187,14 @@  struct uip_dhcp {
 struct uip_info {
 	struct list_head udp_socket_head;
 	struct list_head tcp_socket_head;
-	pthread_mutex_t udp_socket_lock;
-	pthread_mutex_t tcp_socket_lock;
+	struct mutex udp_socket_lock;
+	struct mutex tcp_socket_lock;
 	struct uip_eth_addr guest_mac;
 	struct uip_eth_addr host_mac;
 	pthread_cond_t buf_free_cond;
 	pthread_cond_t buf_used_cond;
 	struct list_head buf_head;
-	pthread_mutex_t buf_lock;
+	struct mutex buf_lock;
 	pthread_t udp_thread;
 	int udp_epollfd;
 	int buf_free_nr;
@@ -221,7 +221,7 @@  struct uip_buf {
 struct uip_udp_socket {
 	struct sockaddr_in addr;
 	struct list_head list;
-	pthread_mutex_t *lock;
+	struct mutex *lock;
 	u32 dport, sport;
 	u32 dip, sip;
 	int fd;
@@ -232,7 +232,7 @@  struct uip_tcp_socket {
 	struct list_head list;
 	struct uip_info *info;
 	pthread_cond_t	cond;
-	pthread_mutex_t *lock;
+	struct mutex *lock;
 	pthread_t thread;
 	u32 dport, sport;
 	u32 guest_acked;
diff --git a/tools/kvm/net/uip/buf.c b/tools/kvm/net/uip/buf.c
index 5e564a9..f29ad41 100644
--- a/tools/kvm/net/uip/buf.c
+++ b/tools/kvm/net/uip/buf.c
@@ -11,7 +11,7 @@  struct uip_buf *uip_buf_get_used(struct uip_info *info)
 	mutex_lock(&info->buf_lock);
 
 	while (!(info->buf_used_nr > 0))
-		pthread_cond_wait(&info->buf_used_cond, &info->buf_lock);
+		pthread_cond_wait(&info->buf_used_cond, &info->buf_lock.mutex);
 
 	list_for_each_entry(buf, &info->buf_head, list) {
 		if (buf->status == UIP_BUF_STATUS_USED) {
@@ -39,7 +39,7 @@  struct uip_buf *uip_buf_get_free(struct uip_info *info)
 	mutex_lock(&info->buf_lock);
 
 	while (!(info->buf_free_nr > 0))
-		pthread_cond_wait(&info->buf_free_cond, &info->buf_lock);
+		pthread_cond_wait(&info->buf_free_cond, &info->buf_lock.mutex);
 
 	list_for_each_entry(buf, &info->buf_head, list) {
 		if (buf->status == UIP_BUF_STATUS_FREE) {
diff --git a/tools/kvm/net/uip/core.c b/tools/kvm/net/uip/core.c
index 2e7603c..4e5bb82 100644
--- a/tools/kvm/net/uip/core.c
+++ b/tools/kvm/net/uip/core.c
@@ -153,9 +153,9 @@  int uip_init(struct uip_info *info)
 	INIT_LIST_HEAD(tcp_socket_head);
 	INIT_LIST_HEAD(buf_head);
 
-	pthread_mutex_init(&info->udp_socket_lock, NULL);
-	pthread_mutex_init(&info->tcp_socket_lock, NULL);
-	pthread_mutex_init(&info->buf_lock, NULL);
+	mutex_init(&info->udp_socket_lock);
+	mutex_init(&info->tcp_socket_lock);
+	mutex_init(&info->buf_lock);
 
 	pthread_cond_init(&info->buf_used_cond, NULL);
 	pthread_cond_init(&info->buf_free_cond, NULL);
diff --git a/tools/kvm/net/uip/tcp.c b/tools/kvm/net/uip/tcp.c
index 830aa3f..9044f40 100644
--- a/tools/kvm/net/uip/tcp.c
+++ b/tools/kvm/net/uip/tcp.c
@@ -27,7 +27,7 @@  static int uip_tcp_socket_close(struct uip_tcp_socket *sk, int how)
 static struct uip_tcp_socket *uip_tcp_socket_find(struct uip_tx_arg *arg, u32 sip, u32 dip, u16 sport, u16 dport)
 {
 	struct list_head *sk_head;
-	pthread_mutex_t *sk_lock;
+	struct mutex *sk_lock;
 	struct uip_tcp_socket *sk;
 
 	sk_head = &arg->info->tcp_socket_head;
@@ -49,7 +49,7 @@  static struct uip_tcp_socket *uip_tcp_socket_alloc(struct uip_tx_arg *arg, u32 s
 {
 	struct list_head *sk_head;
 	struct uip_tcp_socket *sk;
-	pthread_mutex_t *sk_lock;
+	struct mutex *sk_lock;
 	struct uip_tcp *tcp;
 	struct uip_ip *ip;
 	int ret;
@@ -198,7 +198,7 @@  static void *uip_tcp_socket_thread(void *p)
 		while (left > 0) {
 			mutex_lock(sk->lock);
 			while ((len = sk->guest_acked + sk->window_size - sk->seq_server) <= 0)
-				pthread_cond_wait(&sk->cond, sk->lock);
+				pthread_cond_wait(&sk->cond, &sk->lock->mutex);
 			mutex_unlock(sk->lock);
 
 			sk->payload = pos;
diff --git a/tools/kvm/net/uip/udp.c b/tools/kvm/net/uip/udp.c
index 5b6ec1c..31c417c 100644
--- a/tools/kvm/net/uip/udp.c
+++ b/tools/kvm/net/uip/udp.c
@@ -14,7 +14,7 @@  static struct uip_udp_socket *uip_udp_socket_find(struct uip_tx_arg *arg, u32 si
 {
 	struct list_head *sk_head;
 	struct uip_udp_socket *sk;
-	pthread_mutex_t *sk_lock;
+	struct mutex *sk_lock;
 	struct epoll_event ev;
 	int flags;
 	int ret;
diff --git a/tools/kvm/util/threadpool.c b/tools/kvm/util/threadpool.c
index a363831..e64aa26 100644
--- a/tools/kvm/util/threadpool.c
+++ b/tools/kvm/util/threadpool.c
@@ -7,9 +7,9 @@ 
 #include <pthread.h>
 #include <stdbool.h>
 
-static pthread_mutex_t	job_mutex	= PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t	thread_mutex	= PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t	job_cond	= PTHREAD_COND_INITIALIZER;
+static DEFINE_MUTEX(job_mutex);
+static DEFINE_MUTEX(thread_mutex);
+static pthread_cond_t job_cond = PTHREAD_COND_INITIALIZER;
 
 static LIST_HEAD(head);
 
@@ -85,7 +85,7 @@  static void *thread_pool__threadfunc(void *param)
 
 		mutex_lock(&job_mutex);
 		while (running && (curjob = thread_pool__job_pop_locked()) == NULL)
-			pthread_cond_wait(&job_cond, &job_mutex);
+			pthread_cond_wait(&job_cond, &job_mutex.mutex);
 		mutex_unlock(&job_mutex);
 
 		if (running)
diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c
index f76342c..356a240 100644
--- a/tools/kvm/virtio/blk.c
+++ b/tools/kvm/virtio/blk.c
@@ -37,7 +37,7 @@  struct blk_dev_req {
 };
 
 struct blk_dev {
-	pthread_mutex_t			mutex;
+	struct mutex			mutex;
 
 	struct list_head		list;
 
@@ -248,7 +248,7 @@  static int virtio_blk__init_one(struct kvm *kvm, struct disk_image *disk)
 		return -ENOMEM;
 
 	*bdev = (struct blk_dev) {
-		.mutex			= PTHREAD_MUTEX_INITIALIZER,
+		.mutex			= MUTEX_INITIALIZER,
 		.disk			= disk,
 		.blk_config		= (struct virtio_blk_config) {
 			.capacity	= disk->size / SECTOR_SIZE,
diff --git a/tools/kvm/virtio/console.c b/tools/kvm/virtio/console.c
index 88b1106..1df6cb0 100644
--- a/tools/kvm/virtio/console.c
+++ b/tools/kvm/virtio/console.c
@@ -29,7 +29,7 @@ 
 #define VIRTIO_CONSOLE_TX_QUEUE		1
 
 struct con_dev {
-	pthread_mutex_t			mutex;
+	struct mutex			mutex;
 
 	struct virtio_device		vdev;
 	struct virt_queue		vqs[VIRTIO_CONSOLE_NUM_QUEUES];
@@ -40,7 +40,7 @@  struct con_dev {
 };
 
 static struct con_dev cdev = {
-	.mutex				= PTHREAD_MUTEX_INITIALIZER,
+	.mutex				= MUTEX_INITIALIZER,
 
 	.config = {
 		.cols			= 80,
diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c
index ac429cc..db77ab8 100644
--- a/tools/kvm/virtio/net.c
+++ b/tools/kvm/virtio/net.c
@@ -39,7 +39,7 @@  struct net_dev_operations {
 };
 
 struct net_dev {
-	pthread_mutex_t			mutex;
+	struct mutex			mutex;
 	struct virtio_device		vdev;
 	struct list_head		list;
 
@@ -48,11 +48,11 @@  struct net_dev {
 	u32				features;
 
 	pthread_t			io_rx_thread;
-	pthread_mutex_t			io_rx_lock;
+	struct mutex			io_rx_lock;
 	pthread_cond_t			io_rx_cond;
 
 	pthread_t			io_tx_thread;
-	pthread_mutex_t			io_tx_lock;
+	struct mutex			io_tx_lock;
 	pthread_cond_t			io_tx_cond;
 
 	int				vhost_fd;
@@ -87,7 +87,7 @@  static void *virtio_net_rx_thread(void *p)
 	while (1) {
 		mutex_lock(&ndev->io_rx_lock);
 		if (!virt_queue__available(vq))
-			pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock);
+			pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock.mutex);
 		mutex_unlock(&ndev->io_rx_lock);
 
 		while (virt_queue__available(vq)) {
@@ -125,7 +125,7 @@  static void *virtio_net_tx_thread(void *p)
 	while (1) {
 		mutex_lock(&ndev->io_tx_lock);
 		if (!virt_queue__available(vq))
-			pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock);
+			pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock.mutex);
 		mutex_unlock(&ndev->io_tx_lock);
 
 		while (virt_queue__available(vq)) {
@@ -252,8 +252,8 @@  fail:
 
 static void virtio_net__io_thread_init(struct kvm *kvm, struct net_dev *ndev)
 {
-	pthread_mutex_init(&ndev->io_tx_lock, NULL);
-	pthread_mutex_init(&ndev->io_rx_lock, NULL);
+	mutex_init(&ndev->io_tx_lock);
+	mutex_init(&ndev->io_rx_lock);
 
 	pthread_cond_init(&ndev->io_tx_cond, NULL);
 	pthread_cond_init(&ndev->io_rx_cond, NULL);