diff mbox

[v2] vhost-test: Make vhost/test.c work

Message ID 1367997873-16933-1-git-send-email-asias@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Asias He May 8, 2013, 7:24 a.m. UTC
Fix it by switching to use the new device specific fields per vq

Signed-off-by: Asias He <asias@redhat.com>
---

This is for 3.10.

 drivers/vhost/test.c | 35 ++++++++++++++++++++++++-----------
 1 file changed, 24 insertions(+), 11 deletions(-)
diff mbox

Patch

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 1ee45bc..7b49d10 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -29,16 +29,20 @@  enum {
 	VHOST_TEST_VQ_MAX = 1,
 };
 
+struct vhost_test_virtqueue {
+	struct vhost_virtqueue vq;
+};
+
 struct vhost_test {
 	struct vhost_dev dev;
-	struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
+	struct vhost_test_virtqueue vqs[VHOST_TEST_VQ_MAX];
 };
 
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
 static void handle_vq(struct vhost_test *n)
 {
-	struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
+	struct vhost_virtqueue *vq = n->dev.vqs[VHOST_TEST_VQ];
 	unsigned out, in;
 	int head;
 	size_t len, total_len = 0;
@@ -101,15 +105,23 @@  static void handle_vq_kick(struct vhost_work *work)
 static int vhost_test_open(struct inode *inode, struct file *f)
 {
 	struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
+	struct vhost_virtqueue **vqs;
 	struct vhost_dev *dev;
 	int r;
 
 	if (!n)
 		return -ENOMEM;
 
+	vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+	if (!vqs) {
+		kfree(n);
+		return -ENOMEM;
+	}
+
 	dev = &n->dev;
-	n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
-	r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
+	vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ].vq;
+	n->vqs[VHOST_TEST_VQ].vq.handle_kick = handle_vq_kick;
+	r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
 	if (r < 0) {
 		kfree(n);
 		return r;
@@ -135,12 +147,12 @@  static void *vhost_test_stop_vq(struct vhost_test *n,
 
 static void vhost_test_stop(struct vhost_test *n, void **privatep)
 {
-	*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
+	*privatep = vhost_test_stop_vq(n, &n->vqs[VHOST_TEST_VQ].vq);
 }
 
 static void vhost_test_flush_vq(struct vhost_test *n, int index)
 {
-	vhost_poll_flush(&n->dev.vqs[index].poll);
+	vhost_poll_flush(&n->vqs[index].vq.poll);
 }
 
 static void vhost_test_flush(struct vhost_test *n)
@@ -159,6 +171,7 @@  static int vhost_test_release(struct inode *inode, struct file *f)
 	/* We do an extra flush before freeing memory,
 	 * since jobs can re-queue themselves. */
 	vhost_test_flush(n);
+	kfree(n->dev.vqs);
 	kfree(n);
 	return 0;
 }
@@ -179,14 +192,14 @@  static long vhost_test_run(struct vhost_test *n, int test)
 
 	for (index = 0; index < n->dev.nvqs; ++index) {
 		/* Verify that ring has been setup correctly. */
-		if (!vhost_vq_access_ok(&n->vqs[index])) {
+		if (!vhost_vq_access_ok(&n->vqs[index].vq)) {
 			r = -EFAULT;
 			goto err;
 		}
 	}
 
 	for (index = 0; index < n->dev.nvqs; ++index) {
-		vq = n->vqs + index;
+		vq = &n->vqs[index].vq;
 		mutex_lock(&vq->mutex);
 		priv = test ? n : NULL;
 
@@ -195,7 +208,7 @@  static long vhost_test_run(struct vhost_test *n, int test)
 						    lockdep_is_held(&vq->mutex));
 		rcu_assign_pointer(vq->private_data, priv);
 
-		r = vhost_init_used(&n->vqs[index]);
+		r = vhost_init_used(&n->vqs[index].vq);
 
 		mutex_unlock(&vq->mutex);
 
@@ -268,14 +281,14 @@  static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
 			return -EFAULT;
 		return vhost_test_run(n, test);
 	case VHOST_GET_FEATURES:
-		features = VHOST_NET_FEATURES;
+		features = VHOST_FEATURES;
 		if (copy_to_user(featurep, &features, sizeof features))
 			return -EFAULT;
 		return 0;
 	case VHOST_SET_FEATURES:
 		if (copy_from_user(&features, featurep, sizeof features))
 			return -EFAULT;
-		if (features & ~VHOST_NET_FEATURES)
+		if (features & ~VHOST_FEATURES)
 			return -EOPNOTSUPP;
 		return vhost_test_set_features(n, features);
 	case VHOST_RESET_OWNER: