diff mbox series

[v11,20/26] virt: gunyah: Add resource tickets

Message ID 20230304010632.2127470-21-quic_eberman@quicinc.com (mailing list archive)
State New, archived
Headers show
Series Drivers for gunyah hypervisor | expand

Commit Message

Elliot Berman March 4, 2023, 1:06 a.m. UTC
Some VM functions need to acquire Gunyah resources. For instance, Gunyah
vCPUs are exposed to the host as a resource. The Gunyah vCPU function
will register a resource ticket and be able to interact with the
hypervisor once the resource ticket is filled.

Resource tickets are the mechanism for functions to acquire ownership of
Gunyah resources. Gunyah functions can be created before the VM's
resources are created and made available to Linux. A resource ticket
identifies a type of resource and a label of a resource which the ticket
holder is interested in.

Resources are created by Gunyah as configured in the VM's devicetree
configuration. Gunyah doesn't process the label and that makes it
possible for userspace to create multiple resources with the same label.
Resource ticket owners need to be prepared for populate to be called
multiple times if userspace created multiple resources with the same
label.

Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
---
 drivers/virt/gunyah/vm_mgr.c  | 112 +++++++++++++++++++++++++++++++++-
 drivers/virt/gunyah/vm_mgr.h  |   4 ++
 include/linux/gunyah_vm_mgr.h |  14 +++++
 3 files changed, 129 insertions(+), 1 deletion(-)

Comments

Alex Elder March 31, 2023, 2:27 p.m. UTC | #1
On 3/3/23 7:06 PM, Elliot Berman wrote:
> Some VM functions need to acquire Gunyah resources. For instance, Gunyah
> vCPUs are exposed to the host as a resource. The Gunyah vCPU function
> will register a resource ticket and be able to interact with the
> hypervisor once the resource ticket is filled.
> 
> Resource tickets are the mechanism for functions to acquire ownership of
> Gunyah resources. Gunyah functions can be created before the VM's
> resources are created and made available to Linux. A resource ticket
> identifies a type of resource and a label of a resource which the ticket
> holder is interested in.
> 
> Resources are created by Gunyah as configured in the VM's devicetree
> configuration. Gunyah doesn't process the label and that makes it
> possible for userspace to create multiple resources with the same label.
> Resource ticket owners need to be prepared for populate to be called
> multiple times if userspace created multiple resources with the same
> label.
> 
> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>

One possibly substantive suggestion here, plus a couple suggestions
to add or revise comments.

					-Alex

> ---
>   drivers/virt/gunyah/vm_mgr.c  | 112 +++++++++++++++++++++++++++++++++-
>   drivers/virt/gunyah/vm_mgr.h  |   4 ++
>   include/linux/gunyah_vm_mgr.h |  14 +++++
>   3 files changed, 129 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
> index 88db011395ec..0269bcdaf692 100644
> --- a/drivers/virt/gunyah/vm_mgr.c
> +++ b/drivers/virt/gunyah/vm_mgr.c
> @@ -165,6 +165,74 @@ static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
>   	return r;
>   }
>   
> +int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
> +{
> +	struct gh_vm_resource_ticket *iter;
> +	struct gh_resource *ghrsc;
> +	int ret = 0;
> +
> +	mutex_lock(&ghvm->resources_lock);
> +	list_for_each_entry(iter, &ghvm->resource_tickets, list) {
> +		if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
> +			ret = -EEXIST;
> +			goto out;
> +		}
> +	}
> +
> +	if (!try_module_get(ticket->owner)) {
> +		ret = -ENODEV;
> +		goto out;
> +	}
> +
> +	list_add(&ticket->list, &ghvm->resource_tickets);
> +	INIT_LIST_HEAD(&ticket->resources);
> +
> +	list_for_each_entry(ghrsc, &ghvm->resources, list) {
> +		if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
> +			if (!ticket->populate(ticket, ghrsc))
> +				list_move(&ghrsc->list, &ticket->resources);
> +		}
> +	}
> +out:
> +	mutex_unlock(&ghvm->resources_lock);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(gh_vm_add_resource_ticket);
> +
> +void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
> +{
> +	struct gh_resource *ghrsc, *iter;
> +
> +	mutex_lock(&ghvm->resources_lock);
> +	list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
> +		ticket->unpopulate(ticket, ghrsc);
> +		list_move(&ghrsc->list, &ghvm->resources);
> +	}
> +
> +	module_put(ticket->owner);
> +	list_del(&ticket->list);
> +	mutex_unlock(&ghvm->resources_lock);
> +}
> +EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket);
> +
> +static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc)
> +{
> +	struct gh_vm_resource_ticket *ticket;
> +
> +	mutex_lock(&ghvm->resources_lock);
> +	list_for_each_entry(ticket, &ghvm->resource_tickets, list) {
> +		if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
> +			if (!ticket->populate(ticket, ghrsc)) {
> +				list_add(&ghrsc->list, &ticket->resources);
> +				goto found;
> +			}

I think the "goto found" belongs here, unconditionally.
You disallow adding more than one ticket of a given type
with the same label.  So you will never match another
ticket once you've matched this one.

The populate function generally shouldn't fail.  I think
it only fails if you find a duplicate, and again, I think
you prevent that from happening.  (But if it does, you
silently ignore it...)

> +		}
> +	}
> +	list_add(&ghrsc->list, &ghvm->resources);
> +found:
> +	mutex_unlock(&ghvm->resources_lock);
> +}
> +
>   static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data)
>   {
>   	struct gh_rm_vm_status_payload *payload = data;
> @@ -230,6 +298,8 @@ static void gh_vm_free(struct work_struct *work)
>   {
>   	struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
>   	struct gh_vm_function_instance *inst, *iiter;
> +	struct gh_vm_resource_ticket *ticket, *titer;
> +	struct gh_resource *ghrsc, *riter;
>   	struct gh_vm_mem *mapping, *tmp;
>   	int ret;
>   
> @@ -246,6 +316,25 @@ static void gh_vm_free(struct work_struct *work)
>   		}
>   		mutex_unlock(&ghvm->fn_lock);
>   
> +		mutex_lock(&ghvm->resources_lock);
> +		if (!list_empty(&ghvm->resource_tickets)) {
> +			dev_warn(ghvm->parent, "Dangling resource tickets:\n");
> +			list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, list) {
> +				dev_warn(ghvm->parent, "  %pS\n", ticket->populate);
> +				gh_vm_remove_resource_ticket(ghvm, ticket);
> +			}
> +		}
> +
> +		list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
> +			gh_rm_free_resource(ghrsc);
> +		}
> +		mutex_unlock(&ghvm->resources_lock);
> +
> +		ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid);
> +		if (ret)
> +			dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);
> +		wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET);
> +
>   		mutex_lock(&ghvm->mm_lock);
>   		list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
>   			gh_vm_mem_reclaim(ghvm, mapping);
> @@ -329,6 +418,9 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
>   	init_rwsem(&ghvm->status_lock);
>   	INIT_WORK(&ghvm->free_work, gh_vm_free);
>   	kref_init(&ghvm->kref);
> +	mutex_init(&ghvm->resources_lock);
> +	INIT_LIST_HEAD(&ghvm->resources);
> +	INIT_LIST_HEAD(&ghvm->resource_tickets);
>   	INIT_LIST_HEAD(&ghvm->functions);
>   	ghvm->vm_status = GH_RM_VM_STATUS_LOAD;
>   
> @@ -338,9 +430,11 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
>   static int gh_vm_start(struct gh_vm *ghvm)
>   {
>   	struct gh_vm_mem *mapping;
> +	struct gh_rm_hyp_resources *resources;
> +	struct gh_resource *ghrsc;
>   	u64 dtb_offset;
>   	u32 mem_handle;
> -	int ret;
> +	int ret, i, n;
>   
>   	down_write(&ghvm->status_lock);
>   	if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) {
> @@ -394,6 +488,22 @@ static int gh_vm_start(struct gh_vm *ghvm)
>   		goto err;
>   	}
>   
> +	ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
> +	if (ret) {
> +		dev_warn(ghvm->parent, "Failed to get hypervisor resources for VM: %d\n", ret);
> +		goto err;
> +	}
> +
> +	for (i = 0, n = le32_to_cpu(resources->n_entries); i < n; i++) {
> +		ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
> +		if (!ghrsc) {
> +			ret = -ENOMEM;
> +			goto err;
> +		}
> +
> +		gh_vm_add_resource(ghvm, ghrsc);
> +	}
> +
>   	ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
>   	if (ret) {
>   		dev_warn(ghvm->parent, "Failed to start VM: %d\n", ret);
> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
> index 7bd271bad721..18d0e1effd25 100644
> --- a/drivers/virt/gunyah/vm_mgr.h
> +++ b/drivers/virt/gunyah/vm_mgr.h
> @@ -7,6 +7,7 @@
>   #define _GH_PRIV_VM_MGR_H
>   
>   #include <linux/gunyah_rsc_mgr.h>
> +#include <linux/gunyah_vm_mgr.h>
>   #include <linux/list.h>
>   #include <linux/kref.h>
>   #include <linux/miscdevice.h>
> @@ -51,6 +52,9 @@ struct gh_vm {
>   	struct list_head memory_mappings;
>   	struct mutex fn_lock;
>   	struct list_head functions;
> +	struct mutex resources_lock;
> +	struct list_head resources;
> +	struct list_head resource_tickets;
>   };
>   
>   int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
> diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h
> index 3825c951790a..01b1761b5923 100644
> --- a/include/linux/gunyah_vm_mgr.h
> +++ b/include/linux/gunyah_vm_mgr.h
> @@ -70,4 +70,18 @@ void gh_vm_function_unregister(struct gh_vm_function *f);
>   	DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind);	\
>   	module_gh_vm_function(_name)
>   
> +struct gh_vm_resource_ticket {
> +	struct list_head list; /* for gh_vm's resources list */

Maybe "resource lists" above (it's for the resources list and
resource_tickets list).

> +	struct list_head resources; /* for gh_resources's list */

Maybe:	/* resources associated with this ticket */

> +	enum gh_resource_type resource_type;
> +	u32 label;
> +
> +	struct module *owner;
> +	int (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
> +	void (*unpopulate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
> +};
> +
> +int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
> +void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
> +
>   #endif
Elliot Berman April 17, 2023, 10:57 p.m. UTC | #2
On 3/31/2023 7:27 AM, Alex Elder wrote:
> On 3/3/23 7:06 PM, Elliot Berman wrote:
>> Some VM functions need to acquire Gunyah resources. For instance, Gunyah
>> vCPUs are exposed to the host as a resource. The Gunyah vCPU function
>> will register a resource ticket and be able to interact with the
>> hypervisor once the resource ticket is filled.
>>
>> Resource tickets are the mechanism for functions to acquire ownership of
>> Gunyah resources. Gunyah functions can be created before the VM's
>> resources are created and made available to Linux. A resource ticket
>> identifies a type of resource and a label of a resource which the ticket
>> holder is interested in.
>>
>> Resources are created by Gunyah as configured in the VM's devicetree
>> configuration. Gunyah doesn't process the label and that makes it
>> possible for userspace to create multiple resources with the same label.
>> Resource ticket owners need to be prepared for populate to be called
>> multiple times if userspace created multiple resources with the same
>> label.
>>
>> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
> 
> One possibly substantive suggestion here, plus a couple suggestions
> to add or revise comments.
> 
>                      -Alex
> 
>> ---
>>   drivers/virt/gunyah/vm_mgr.c  | 112 +++++++++++++++++++++++++++++++++-
>>   drivers/virt/gunyah/vm_mgr.h  |   4 ++
>>   include/linux/gunyah_vm_mgr.h |  14 +++++
>>   3 files changed, 129 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
>> index 88db011395ec..0269bcdaf692 100644
>> --- a/drivers/virt/gunyah/vm_mgr.c
>> +++ b/drivers/virt/gunyah/vm_mgr.c
>> @@ -165,6 +165,74 @@ static long gh_vm_rm_function(struct gh_vm *ghvm, 
>> struct gh_fn_desc *f)
>>       return r;
>>   }
>> +int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct 
>> gh_vm_resource_ticket *ticket)
>> +{
>> +    struct gh_vm_resource_ticket *iter;
>> +    struct gh_resource *ghrsc;
>> +    int ret = 0;
>> +
>> +    mutex_lock(&ghvm->resources_lock);
>> +    list_for_each_entry(iter, &ghvm->resource_tickets, list) {
>> +        if (iter->resource_type == ticket->resource_type && 
>> iter->label == ticket->label) {
>> +            ret = -EEXIST;
>> +            goto out;
>> +        }
>> +    }
>> +
>> +    if (!try_module_get(ticket->owner)) {
>> +        ret = -ENODEV;
>> +        goto out;
>> +    }
>> +
>> +    list_add(&ticket->list, &ghvm->resource_tickets);
>> +    INIT_LIST_HEAD(&ticket->resources);
>> +
>> +    list_for_each_entry(ghrsc, &ghvm->resources, list) {
>> +        if (ghrsc->type == ticket->resource_type && ghrsc->rm_label 
>> == ticket->label) {
>> +            if (!ticket->populate(ticket, ghrsc))
>> +                list_move(&ghrsc->list, &ticket->resources);
>> +        }
>> +    }
>> +out:
>> +    mutex_unlock(&ghvm->resources_lock);
>> +    return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(gh_vm_add_resource_ticket);
>> +
>> +void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct 
>> gh_vm_resource_ticket *ticket)
>> +{
>> +    struct gh_resource *ghrsc, *iter;
>> +
>> +    mutex_lock(&ghvm->resources_lock);
>> +    list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
>> +        ticket->unpopulate(ticket, ghrsc);
>> +        list_move(&ghrsc->list, &ghvm->resources);
>> +    }
>> +
>> +    module_put(ticket->owner);
>> +    list_del(&ticket->list);
>> +    mutex_unlock(&ghvm->resources_lock);
>> +}
>> +EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket);
>> +
>> +static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource 
>> *ghrsc)
>> +{
>> +    struct gh_vm_resource_ticket *ticket;
>> +
>> +    mutex_lock(&ghvm->resources_lock);
>> +    list_for_each_entry(ticket, &ghvm->resource_tickets, list) {
>> +        if (ghrsc->type == ticket->resource_type && ghrsc->rm_label 
>> == ticket->label) {
>> +            if (!ticket->populate(ticket, ghrsc)) {
>> +                list_add(&ghrsc->list, &ticket->resources);
>> +                goto found;
>> +            }
> 
> I think the "goto found" belongs here, unconditionally.
> You disallow adding more than one ticket of a given type
> with the same label.  So you will never match another
> ticket once you've matched this one.
> 
> The populate function generally shouldn't fail.  I think
> it only fails if you find a duplicate, and again, I think
> you prevent that from happening.  (But if it does, you
> silently ignore it...)
> 

I agree with this suggestion, no need to waste continue checking other 
tickets once we find the match. I'll move the "goto found" line.

[snip]

Thanks,
Elliot
diff mbox series

Patch

diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index 88db011395ec..0269bcdaf692 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -165,6 +165,74 @@  static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
 	return r;
 }
 
+int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
+{
+	struct gh_vm_resource_ticket *iter;
+	struct gh_resource *ghrsc;
+	int ret = 0;
+
+	mutex_lock(&ghvm->resources_lock);
+	list_for_each_entry(iter, &ghvm->resource_tickets, list) {
+		if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
+			ret = -EEXIST;
+			goto out;
+		}
+	}
+
+	if (!try_module_get(ticket->owner)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	list_add(&ticket->list, &ghvm->resource_tickets);
+	INIT_LIST_HEAD(&ticket->resources);
+
+	list_for_each_entry(ghrsc, &ghvm->resources, list) {
+		if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+			if (!ticket->populate(ticket, ghrsc))
+				list_move(&ghrsc->list, &ticket->resources);
+		}
+	}
+out:
+	mutex_unlock(&ghvm->resources_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gh_vm_add_resource_ticket);
+
+void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
+{
+	struct gh_resource *ghrsc, *iter;
+
+	mutex_lock(&ghvm->resources_lock);
+	list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
+		ticket->unpopulate(ticket, ghrsc);
+		list_move(&ghrsc->list, &ghvm->resources);
+	}
+
+	module_put(ticket->owner);
+	list_del(&ticket->list);
+	mutex_unlock(&ghvm->resources_lock);
+}
+EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket);
+
+static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc)
+{
+	struct gh_vm_resource_ticket *ticket;
+
+	mutex_lock(&ghvm->resources_lock);
+	list_for_each_entry(ticket, &ghvm->resource_tickets, list) {
+		if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+			if (!ticket->populate(ticket, ghrsc)) {
+				list_add(&ghrsc->list, &ticket->resources);
+				goto found;
+			}
+		}
+	}
+	list_add(&ghrsc->list, &ghvm->resources);
+found:
+	mutex_unlock(&ghvm->resources_lock);
+}
+
 static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data)
 {
 	struct gh_rm_vm_status_payload *payload = data;
@@ -230,6 +298,8 @@  static void gh_vm_free(struct work_struct *work)
 {
 	struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
 	struct gh_vm_function_instance *inst, *iiter;
+	struct gh_vm_resource_ticket *ticket, *titer;
+	struct gh_resource *ghrsc, *riter;
 	struct gh_vm_mem *mapping, *tmp;
 	int ret;
 
@@ -246,6 +316,25 @@  static void gh_vm_free(struct work_struct *work)
 		}
 		mutex_unlock(&ghvm->fn_lock);
 
+		mutex_lock(&ghvm->resources_lock);
+		if (!list_empty(&ghvm->resource_tickets)) {
+			dev_warn(ghvm->parent, "Dangling resource tickets:\n");
+			list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, list) {
+				dev_warn(ghvm->parent, "  %pS\n", ticket->populate);
+				gh_vm_remove_resource_ticket(ghvm, ticket);
+			}
+		}
+
+		list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
+			gh_rm_free_resource(ghrsc);
+		}
+		mutex_unlock(&ghvm->resources_lock);
+
+		ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid);
+		if (ret)
+			dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);
+		wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET);
+
 		mutex_lock(&ghvm->mm_lock);
 		list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
 			gh_vm_mem_reclaim(ghvm, mapping);
@@ -329,6 +418,9 @@  static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
 	init_rwsem(&ghvm->status_lock);
 	INIT_WORK(&ghvm->free_work, gh_vm_free);
 	kref_init(&ghvm->kref);
+	mutex_init(&ghvm->resources_lock);
+	INIT_LIST_HEAD(&ghvm->resources);
+	INIT_LIST_HEAD(&ghvm->resource_tickets);
 	INIT_LIST_HEAD(&ghvm->functions);
 	ghvm->vm_status = GH_RM_VM_STATUS_LOAD;
 
@@ -338,9 +430,11 @@  static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
 static int gh_vm_start(struct gh_vm *ghvm)
 {
 	struct gh_vm_mem *mapping;
+	struct gh_rm_hyp_resources *resources;
+	struct gh_resource *ghrsc;
 	u64 dtb_offset;
 	u32 mem_handle;
-	int ret;
+	int ret, i, n;
 
 	down_write(&ghvm->status_lock);
 	if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) {
@@ -394,6 +488,22 @@  static int gh_vm_start(struct gh_vm *ghvm)
 		goto err;
 	}
 
+	ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
+	if (ret) {
+		dev_warn(ghvm->parent, "Failed to get hypervisor resources for VM: %d\n", ret);
+		goto err;
+	}
+
+	for (i = 0, n = le32_to_cpu(resources->n_entries); i < n; i++) {
+		ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
+		if (!ghrsc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		gh_vm_add_resource(ghvm, ghrsc);
+	}
+
 	ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
 	if (ret) {
 		dev_warn(ghvm->parent, "Failed to start VM: %d\n", ret);
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index 7bd271bad721..18d0e1effd25 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -7,6 +7,7 @@ 
 #define _GH_PRIV_VM_MGR_H
 
 #include <linux/gunyah_rsc_mgr.h>
+#include <linux/gunyah_vm_mgr.h>
 #include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/miscdevice.h>
@@ -51,6 +52,9 @@  struct gh_vm {
 	struct list_head memory_mappings;
 	struct mutex fn_lock;
 	struct list_head functions;
+	struct mutex resources_lock;
+	struct list_head resources;
+	struct list_head resource_tickets;
 };
 
 int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h
index 3825c951790a..01b1761b5923 100644
--- a/include/linux/gunyah_vm_mgr.h
+++ b/include/linux/gunyah_vm_mgr.h
@@ -70,4 +70,18 @@  void gh_vm_function_unregister(struct gh_vm_function *f);
 	DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind);	\
 	module_gh_vm_function(_name)
 
+struct gh_vm_resource_ticket {
+	struct list_head list; /* for gh_vm's resources list */
+	struct list_head resources; /* for gh_resources's list */
+	enum gh_resource_type resource_type;
+	u32 label;
+
+	struct module *owner;
+	int (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
+	void (*unpopulate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
+};
+
+int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
+void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
+
 #endif