diff mbox series

[v17,33/35] virt: gunyah: Add IO handlers

Message ID 20240222-gunyah-v17-33-1e9da6763d38@quicinc.com (mailing list archive)
State New, archived
Headers show
Series Drivers for Gunyah hypervisor | expand

Commit Message

Elliot Berman Feb. 22, 2024, 11:16 p.m. UTC
Add framework for VM functions to handle stage-2 write faults from Gunyah
guest virtual machines. IO handlers have a range of addresses which they
apply to. Optionally, they may apply to only when the value written
matches the IO handler's value.

Reviewed-by: Alex Elder <elder@linaro.org>
Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
---
 drivers/virt/gunyah/gunyah_vcpu.c |   4 ++
 drivers/virt/gunyah/vm_mgr.c      | 115 ++++++++++++++++++++++++++++++++++++++
 drivers/virt/gunyah/vm_mgr.h      |   8 +++
 include/linux/gunyah.h            |  29 ++++++++++
 4 files changed, 156 insertions(+)
diff mbox series

Patch

diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c
index f8306620b1dd6..ef78503fe586d 100644
--- a/drivers/virt/gunyah/gunyah_vcpu.c
+++ b/drivers/virt/gunyah/gunyah_vcpu.c
@@ -133,6 +133,10 @@  gunyah_handle_mmio(struct gunyah_vcpu *vcpu, unsigned long resume_data[3],
 		vcpu->state = GUNYAH_VCPU_RUN_STATE_MMIO_READ;
 		vcpu->mmio_read_len = len;
 	} else { /* GUNYAH_VCPU_ADDRSPACE_VMMIO_WRITE */
+		if (!gunyah_vm_mmio_write(vcpu->ghvm, addr, len, data)) {
+			resume_data[0] = GUNYAH_ADDRSPACE_VMMIO_ACTION_EMULATE;
+			return true;
+		}
 		vcpu->vcpu_run->mmio.is_write = 1;
 		memcpy(vcpu->vcpu_run->mmio.data, &data, len);
 		vcpu->state = GUNYAH_VCPU_RUN_STATE_MMIO_WRITE;
diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index a61a3f3ae92f1..2434be5dffe08 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -302,6 +302,118 @@  static void gunyah_vm_clean_resources(struct gunyah_vm *ghvm)
 	mutex_unlock(&ghvm->resources_lock);
 }
 
+static int _gunyah_vm_io_handler_compare(const struct rb_node *node,
+					 const struct rb_node *parent)
+{
+	struct gunyah_vm_io_handler *n =
+		container_of(node, struct gunyah_vm_io_handler, node);
+	struct gunyah_vm_io_handler *p =
+		container_of(parent, struct gunyah_vm_io_handler, node);
+
+	if (n->addr < p->addr)
+		return -1;
+	if (n->addr > p->addr)
+		return 1;
+	if ((n->len && !p->len) || (!n->len && p->len))
+		return 0;
+	if (n->len < p->len)
+		return -1;
+	if (n->len > p->len)
+		return 1;
+	/* one of the io handlers doesn't have datamatch and the other does.
+	 * For purposes of comparison, that makes them identical since the
+	 * one that doesn't have datamatch will cover the same handler that
+	 * does.
+	 */
+	if (n->datamatch != p->datamatch)
+		return 0;
+	if (n->data < p->data)
+		return -1;
+	if (n->data > p->data)
+		return 1;
+	return 0;
+}
+
+static int gunyah_vm_io_handler_compare(struct rb_node *node,
+					const struct rb_node *parent)
+{
+	return _gunyah_vm_io_handler_compare(node, parent);
+}
+
+static int gunyah_vm_io_handler_find(const void *key,
+				     const struct rb_node *node)
+{
+	const struct gunyah_vm_io_handler *k = key;
+
+	return _gunyah_vm_io_handler_compare(&k->node, node);
+}
+
+static struct gunyah_vm_io_handler *
+gunyah_vm_mgr_find_io_hdlr(struct gunyah_vm *ghvm, u64 addr, u64 len, u64 data)
+{
+	struct gunyah_vm_io_handler key = {
+		.addr = addr,
+		.len = len,
+		.datamatch = true,
+		.data = data,
+	};
+	struct rb_node *node;
+
+	node = rb_find(&key, &ghvm->mmio_handler_root,
+		       gunyah_vm_io_handler_find);
+	if (!node)
+		return NULL;
+
+	return container_of(node, struct gunyah_vm_io_handler, node);
+}
+
+int gunyah_vm_mmio_write(struct gunyah_vm *ghvm, u64 addr, u32 len, u64 data)
+{
+	struct gunyah_vm_io_handler *io_hdlr = NULL;
+	int ret;
+
+	down_read(&ghvm->mmio_handler_lock);
+	io_hdlr = gunyah_vm_mgr_find_io_hdlr(ghvm, addr, len, data);
+	if (!io_hdlr || !io_hdlr->ops || !io_hdlr->ops->write) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ret = io_hdlr->ops->write(io_hdlr, addr, len, data);
+
+out:
+	up_read(&ghvm->mmio_handler_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gunyah_vm_mmio_write);
+
+int gunyah_vm_add_io_handler(struct gunyah_vm *ghvm,
+			     struct gunyah_vm_io_handler *io_hdlr)
+{
+	struct rb_node *found;
+
+	if (io_hdlr->datamatch &&
+	    (!io_hdlr->len || io_hdlr->len > sizeof(io_hdlr->data)))
+		return -EINVAL;
+
+	down_write(&ghvm->mmio_handler_lock);
+	found = rb_find_add(&io_hdlr->node, &ghvm->mmio_handler_root,
+			    gunyah_vm_io_handler_compare);
+	up_write(&ghvm->mmio_handler_lock);
+
+	return found ? -EEXIST : 0;
+}
+EXPORT_SYMBOL_GPL(gunyah_vm_add_io_handler);
+
+void gunyah_vm_remove_io_handler(struct gunyah_vm *ghvm,
+				 struct gunyah_vm_io_handler *io_hdlr)
+{
+	down_write(&ghvm->mmio_handler_lock);
+	rb_erase(&io_hdlr->node, &ghvm->mmio_handler_root);
+	up_write(&ghvm->mmio_handler_lock);
+}
+EXPORT_SYMBOL_GPL(gunyah_vm_remove_io_handler);
+
 static int gunyah_vm_rm_notification_status(struct gunyah_vm *ghvm, void *data)
 {
 	struct gunyah_rm_vm_status_payload *payload = data;
@@ -404,6 +516,9 @@  static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gunyah_rm *rm)
 	INIT_LIST_HEAD(&ghvm->resource_tickets);
 	xa_init(&ghvm->boot_context);
 
+	init_rwsem(&ghvm->mmio_handler_lock);
+	ghvm->mmio_handler_root = RB_ROOT;
+
 	mt_init(&ghvm->mm);
 	mt_init(&ghvm->bindings);
 	init_rwsem(&ghvm->bindings_lock);
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index 8cee93e551700..daddb1d0cb70b 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -11,6 +11,7 @@ 
 #include <linux/maple_tree.h>
 #include <linux/mutex.h>
 #include <linux/pagemap.h>
+#include <linux/rbtree.h>
 #include <linux/rwsem.h>
 #include <linux/set_memory.h>
 #include <linux/wait.h>
@@ -58,6 +59,9 @@  long gunyah_dev_vm_mgr_ioctl(struct gunyah_rm *rm, unsigned int cmd,
  * @guest_shared_extent_ticket: Resource ticket to the capability for
  *                              the memory extent that represents
  *                              memory shared with the guest.
+ * @mmio_handler_root: RB tree of MMIO handlers.
+ *                     Entries are &struct gunyah_vm_io_handler
+ * @mmio_handler_lock: Serialization of traversing @mmio_handler_root
  * @rm: Pointer to the resource manager struct to make RM calls
  * @parent: For logging
  * @nb: Notifier block for RM notifications
@@ -93,6 +97,8 @@  struct gunyah_vm {
 	struct gunyah_vm_resource_ticket addrspace_ticket,
 		host_private_extent_ticket, host_shared_extent_ticket,
 		guest_private_extent_ticket, guest_shared_extent_ticket;
+	struct rb_root mmio_handler_root;
+	struct rw_semaphore mmio_handler_lock;
 
 	struct gunyah_rm *rm;
 
@@ -119,6 +125,8 @@  struct gunyah_vm {
 	struct xarray boot_context;
 };
 
+int gunyah_vm_mmio_write(struct gunyah_vm *ghvm, u64 addr, u32 len, u64 data);
+
 /**
  * folio_mmapped() - Returns true if the folio is mapped into any vma
  * @folio: Folio to test
diff --git a/include/linux/gunyah.h b/include/linux/gunyah.h
index a9d58150de696..dbd5b0251b491 100644
--- a/include/linux/gunyah.h
+++ b/include/linux/gunyah.h
@@ -155,6 +155,35 @@  int gunyah_vm_add_resource_ticket(struct gunyah_vm *ghvm,
 void gunyah_vm_remove_resource_ticket(struct gunyah_vm *ghvm,
 				      struct gunyah_vm_resource_ticket *ticket);
 
+/*
+ * gunyah_vm_io_handler contains the info about an io device and its associated
+ * addr and the ops associated with the io device.
+ */
+struct gunyah_vm_io_handler {
+	struct rb_node node;
+	u64 addr;
+
+	bool datamatch;
+	u8 len;
+	u64 data;
+	struct gunyah_vm_io_handler_ops *ops;
+};
+
+/*
+ * gunyah_vm_io_handler_ops contains function pointers associated with an iodevice.
+ */
+struct gunyah_vm_io_handler_ops {
+	int (*read)(struct gunyah_vm_io_handler *io_dev, u64 addr, u32 len,
+		    u64 data);
+	int (*write)(struct gunyah_vm_io_handler *io_dev, u64 addr, u32 len,
+		     u64 data);
+};
+
+int gunyah_vm_add_io_handler(struct gunyah_vm *ghvm,
+			     struct gunyah_vm_io_handler *io_dev);
+void gunyah_vm_remove_io_handler(struct gunyah_vm *ghvm,
+				 struct gunyah_vm_io_handler *io_dev);
+
 #define GUNYAH_RM_ACL_X BIT(0)
 #define GUNYAH_RM_ACL_W BIT(1)
 #define GUNYAH_RM_ACL_R BIT(2)