diff mbox series

[RFC/PATCH,v2,03/12] hw/arm/virt: confidential guest support

Message ID 20240516143356.1739402-4-quic_svaddagi@quicinc.com (mailing list archive)
State New
Headers show
Series Gunyah hypervisor support | expand

Commit Message

Srivatsa Vaddagiri May 16, 2024, 2:33 p.m. UTC
This adds support to launch hypervisor-assisted confidential guests,
where guest's memory is protected from a potentially untrusted host.
Hypervisor can setup host's page-tables so that it loses access to guest
memory.

Since some guest drivers may need to communicate data with their host
counterparts via shared memory, optionally allow setting aside some part
of the confidential guest's memory as "shared". The size of this shared
memory is specified via the optional "swiotlb-size" parameter.

-machine virt,confidential-guest-support=prot0 \
	-object arm-confidential-guest,id=prot0,swiotlb-size=16777216

The size of this shared memory is indicated to the guest in size/reg
property of device-tree node "/reserved-memory/restricted_dma_reserved".
A memory-region property is added to device-tree node representing
virtio-pcie hub, so that all DMA allocations requested by guest's virtio-pcie
device drivers are satisfied from the shared swiotlb region.

Signed-off-by: Srivatsa Vaddagiri <quic_svaddagi@quicinc.com>
---
 qapi/qom.json         |  14 +++++
 include/hw/arm/virt.h |   1 +
 hw/arm/virt.c         | 141 +++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 155 insertions(+), 1 deletion(-)

Comments

Daniel P. Berrangé May 16, 2024, 3:04 p.m. UTC | #1
On Thu, May 16, 2024 at 02:33:47PM +0000, Srivatsa Vaddagiri wrote:
> This adds support to launch hypervisor-assisted confidential guests,
> where guest's memory is protected from a potentially untrusted host.
> Hypervisor can setup host's page-tables so that it loses access to guest
> memory.
> 
> Since some guest drivers may need to communicate data with their host
> counterparts via shared memory, optionally allow setting aside some part
> of the confidential guest's memory as "shared". The size of this shared
> memory is specified via the optional "swiotlb-size" parameter.
> 
> -machine virt,confidential-guest-support=prot0 \
> 	-object arm-confidential-guest,id=prot0,swiotlb-size=16777216
> 
> The size of this shared memory is indicated to the guest in size/reg
> property of device-tree node "/reserved-memory/restricted_dma_reserved".
> A memory-region property is added to device-tree node representing
> virtio-pcie hub, so that all DMA allocations requested by guest's virtio-pcie
> device drivers are satisfied from the shared swiotlb region.

For reference, there is another series proposing confidential guest
support for the 'virt' machine on AArch64 with KVM

 https://lists.nongnu.org/archive/html/qemu-devel/2024-04/msg02742.html

I've no idea how closely your impl matches the KVM proposed impl. ie
whether we need 2 distinct "ConfidentialGuest" subclasses for KVM vs
Gunyah, or whether 1 can cope with both.  If we do need 2 distinct
subclasses for each hypervisor, then calling this Gunyah targetted
object 'arm-confidential-guest' is too broad of an name.

> 
> Signed-off-by: Srivatsa Vaddagiri <quic_svaddagi@quicinc.com>
> ---
>  qapi/qom.json         |  14 +++++
>  include/hw/arm/virt.h |   1 +
>  hw/arm/virt.c         | 141 +++++++++++++++++++++++++++++++++++++++++-
>  3 files changed, 155 insertions(+), 1 deletion(-)
> 
> diff --git a/qapi/qom.json b/qapi/qom.json
> index 38dde6d785..9b3cd7ce22 100644
> --- a/qapi/qom.json
> +++ b/qapi/qom.json
> @@ -874,6 +874,18 @@
>    'base': 'RngProperties',
>    'data': { '*filename': 'str' } }
>  
> +##
> +# @ArmConfidentialGuestProperties:
> +#
> +# Properties for arm-confidential-guest objects.
> +#
> +# @swiotlb-size: swiotlb size
> +#
> +# Since: 2.12
> +##
> +{ 'struct': 'ArmConfidentialGuestProperties',
> +  'data': { 'swiotlb-size' : 'uint64' } }
> +
>  ##
>  # @SevGuestProperties:
>  #
> @@ -997,6 +1009,7 @@
>      { 'name': 'secret_keyring',
>        'if': 'CONFIG_SECRET_KEYRING' },
>      'sev-guest',
> +    'arm-confidential-guest',
>      'thread-context',
>      's390-pv-guest',
>      'throttle-group',
> @@ -1067,6 +1080,7 @@
>        'secret_keyring':             { 'type': 'SecretKeyringProperties',
>                                        'if': 'CONFIG_SECRET_KEYRING' },
>        'sev-guest':                  'SevGuestProperties',
> +      'arm-confidential-guest':     'ArmConfidentialGuestProperties',
>        'thread-context':             'ThreadContextProperties',
>        'throttle-group':             'ThrottleGroupProperties',
>        'tls-creds-anon':             'TlsCredsAnonProperties',
> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
> index bb486d36b1..1e23f20972 100644
> --- a/include/hw/arm/virt.h
> +++ b/include/hw/arm/virt.h
> @@ -165,6 +165,7 @@ struct VirtMachineState {
>      uint32_t clock_phandle;
>      uint32_t gic_phandle;
>      uint32_t msi_phandle;
> +    uint32_t restricted_dma_phandle;
>      uint32_t iommu_phandle;
>      int psci_conduit;
>      hwaddr highest_gpa;
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 3c93c0c0a6..2a3eb4075d 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -84,6 +84,9 @@
>  #include "hw/virtio/virtio-iommu.h"
>  #include "hw/char/pl011.h"
>  #include "qemu/guest-random.h"
> +#include "sysemu/cpus.h"
> +#include "exec/confidential-guest-support.h"
> +#include "qom/object_interfaces.h"
>  
>  static GlobalProperty arm_virt_compat[] = {
>      { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "48" },
> @@ -1545,6 +1548,11 @@ static void create_pcie(VirtMachineState *vms)
>                             nr_pcie_buses - 1);
>      qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
>  
> +    if (vms->restricted_dma_phandle) {
> +        qemu_fdt_setprop_cell(ms->fdt, nodename, "memory-region",
> +                                vms->restricted_dma_phandle);
> +    }
> +
>      if (vms->msi_phandle) {
>          qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map",
>                                 0, vms->msi_phandle, 0, 0x10000);
> @@ -2065,6 +2073,129 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
>      }
>  }
>  
> +#define TYPE_ARM_CONFIDENTIAL_GUEST "arm-confidential-guest"
> +OBJECT_DECLARE_SIMPLE_TYPE(ArmConfidentialGuestState, ARM_CONFIDENTIAL_GUEST)
> +
> +struct ArmConfidentialGuestState {
> +    ConfidentialGuestSupport parent_obj;
> +
> +    hwaddr swiotlb_size;
> +};
> +
> +static ArmConfidentialGuestState *acg;
> +
> +static void
> +arm_confidential_guest_instance_init(Object *obj)
> +{
> +    ArmConfidentialGuestState *acg = ARM_CONFIDENTIAL_GUEST(obj);
> +
> +    object_property_add_uint64_ptr(obj, "swiotlb-size", &acg->swiotlb_size,
> +                                   OBJ_PROP_FLAG_READWRITE);
> +}
> +
> +static const TypeInfo confidential_guest_info = {
> +    .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT,
> +    .name = TYPE_ARM_CONFIDENTIAL_GUEST,
> +    .instance_size = sizeof(ArmConfidentialGuestState),
> +    .instance_init = arm_confidential_guest_instance_init,
> +    .interfaces = (InterfaceInfo[]) {
> +        { TYPE_USER_CREATABLE },
> +        { }
> +    }
> +};
> +
> +static void
> +confidential_guest_register_types(void)
> +{
> +    type_register_static(&confidential_guest_info);
> +}
> +type_init(confidential_guest_register_types);
> +
> +static int confidential_guest_init(MachineState *ms)
> +{
> +    ConfidentialGuestSupport *cgs = ms->cgs;
> +    ArmConfidentialGuestState *obj = (ArmConfidentialGuestState *)
> +        object_dynamic_cast(OBJECT(cgs), TYPE_ARM_CONFIDENTIAL_GUEST);
> +    const AccelOpsClass *ops = cpus_get_accel();
> +
> +    if (!obj) {
> +        return 0;
> +    }
> +
> +    if (!ops->check_capability ||
> +            !ops->check_capability(CONFIDENTIAL_GUEST_SUPPORTED)) {
> +        error_report("confidential guests are not supported");
> +        return -1;
> +    }
> +
> +    if (obj->swiotlb_size > ms->ram_size) {
> +        error_report("swiotlb_size exceeds RAM size");
> +        return -1;
> +    }
> +
> +    acg = obj;
> +    cgs->ready = true;
> +
> +    return 0;
> +}
> +
> +static void fdt_add_reserved_memory(VirtMachineState *vms)
> +{
> +    MachineState *ms = MACHINE(vms);
> +    hwaddr membase = vms->memmap[VIRT_MEM].base;
> +    hwaddr memsize = ms->ram_size;
> +    hwaddr resv_start;
> +    const char compat[] = "restricted-dma-pool";
> +    const AccelOpsClass *ops = cpus_get_accel();
> +    char *nodename;
> +
> +    if (!acg || !acg->swiotlb_size) {
> +        return;
> +    }
> +
> +    nodename = g_strdup_printf("/reserved-memory");
> +
> +    qemu_fdt_add_subnode(ms->fdt, nodename);
> +    qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 2);
> +    qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2);
> +    qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0);
> +    g_free(nodename);
> +
> +    resv_start = membase + memsize - acg->swiotlb_size;
> +    if (ops->check_capability &&
> +            ops->check_capability(CONFIDENTIAL_GUEST_CAN_SHARE_MEM_WITH_HOST)) {
> +        /*
> +         * Indicate only the size of swiotlb buffer needed. Guest will
> +         * determine where in its private memory the buffer will be placed and
> +         * will use appropriate (hypervisor) APIs to share that with host.
> +         */
> +        nodename = g_strdup_printf("/reserved-memory/restricted_dma_reserved");
> +        qemu_fdt_add_subnode(ms->fdt, nodename);
> +        qemu_fdt_setprop_cell(ms->fdt, nodename, "size", acg->swiotlb_size);
> +        qemu_fdt_setprop_cell(ms->fdt, nodename, "alignment", 4096);
> +    } else {
> +        /*
> +         * On hypervisors that don't support APIs for guest to share
> +         * its (private) memory with host, indicate to the guest where in its
> +         * address space shared memory can be found. Host should make arrangents
> +         * with hypervisor to assign some memory to guest at the indicated range
> +         * and mark it as shared.
> +         */
> +        nodename = g_strdup_printf("/reserved-memory/restricted_dma_reserved@%"
> +                PRIx64, resv_start);
> +        qemu_fdt_add_subnode(ms->fdt, nodename);
> +        qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
> +                                         2, resv_start,
> +                                         2, acg->swiotlb_size);
> +    }
> +
> +    vms->restricted_dma_phandle = qemu_fdt_alloc_phandle(ms->fdt);
> +    qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
> +            vms->restricted_dma_phandle);
> +    qemu_fdt_setprop(ms->fdt, nodename, "compatible", compat, sizeof(compat));
> +    g_free(nodename);
> +}
> +
>  static void machvirt_init(MachineState *machine)
>  {
>      VirtMachineState *vms = VIRT_MACHINE(machine);
> @@ -2075,7 +2206,7 @@ static void machvirt_init(MachineState *machine)
>      MemoryRegion *secure_sysmem = NULL;
>      MemoryRegion *tag_sysmem = NULL;
>      MemoryRegion *secure_tag_sysmem = NULL;
> -    int n, virt_max_cpus;
> +    int n, virt_max_cpus, ret;
>      bool firmware_loaded;
>      bool aarch64 = true;
>      bool has_ged = !vmc->no_ged;
> @@ -2084,6 +2215,12 @@ static void machvirt_init(MachineState *machine)
>  
>      possible_cpus = mc->possible_cpu_arch_ids(machine);
>  
> +    ret = confidential_guest_init(machine);
> +    if (ret != 0) {
> +        error_report("Failed to initialize confidential guest");
> +        exit(1);
> +    }
> +
>      /*
>       * In accelerated mode, the memory map is computed earlier in kvm_type()
>       * to create a VM with the right number of IPA bits.
> @@ -2195,6 +2332,8 @@ static void machvirt_init(MachineState *machine)
>  
>      create_fdt(vms);
>  
> +    fdt_add_reserved_memory(vms);
> +
>      assert(possible_cpus->len == max_cpus);
>      for (n = 0; n < possible_cpus->len; n++) {
>          Object *cpuobj;
> -- 
> 2.25.1
> 
> 

With regards,
Daniel
Srivatsa Vaddagiri May 16, 2024, 7:41 p.m. UTC | #2
* Daniel P. Berrang? <berrange@redhat.com> [2024-05-16 16:04:24]:

> On Thu, May 16, 2024 at 02:33:47PM +0000, Srivatsa Vaddagiri wrote:
> > This adds support to launch hypervisor-assisted confidential guests,
> > where guest's memory is protected from a potentially untrusted host.
> > Hypervisor can setup host's page-tables so that it loses access to guest
> > memory.
> > 
> > Since some guest drivers may need to communicate data with their host
> > counterparts via shared memory, optionally allow setting aside some part
> > of the confidential guest's memory as "shared". The size of this shared
> > memory is specified via the optional "swiotlb-size" parameter.
> > 
> > -machine virt,confidential-guest-support=prot0 \
> > 	-object arm-confidential-guest,id=prot0,swiotlb-size=16777216
> > 
> > The size of this shared memory is indicated to the guest in size/reg
> > property of device-tree node "/reserved-memory/restricted_dma_reserved".
> > A memory-region property is added to device-tree node representing
> > virtio-pcie hub, so that all DMA allocations requested by guest's virtio-pcie
> > device drivers are satisfied from the shared swiotlb region.
> 
> For reference, there is another series proposing confidential guest
> support for the 'virt' machine on AArch64 with KVM
> 
>  https://lists.nongnu.org/archive/html/qemu-devel/2024-04/msg02742.html
> 
> I've no idea how closely your impl matches the KVM proposed impl. ie
> whether we need 2 distinct "ConfidentialGuest" subclasses for KVM vs
> Gunyah, or whether 1 can cope with both.  If we do need 2 distinct
> subclasses for each hypervisor, then calling this Gunyah targetted
> object 'arm-confidential-guest' is too broad of an name.

Thanks for that pointer! Let me study the proposed KVM implementation and 
see how we can consolidate support for KVM and Gunyah hypervisors.

- vatsa
diff mbox series

Patch

diff --git a/qapi/qom.json b/qapi/qom.json
index 38dde6d785..9b3cd7ce22 100644
--- a/qapi/qom.json
+++ b/qapi/qom.json
@@ -874,6 +874,18 @@ 
   'base': 'RngProperties',
   'data': { '*filename': 'str' } }
 
+##
+# @ArmConfidentialGuestProperties:
+#
+# Properties for arm-confidential-guest objects.
+#
+# @swiotlb-size: swiotlb size
+#
+# Since: 2.12
+##
+{ 'struct': 'ArmConfidentialGuestProperties',
+  'data': { 'swiotlb-size' : 'uint64' } }
+
 ##
 # @SevGuestProperties:
 #
@@ -997,6 +1009,7 @@ 
     { 'name': 'secret_keyring',
       'if': 'CONFIG_SECRET_KEYRING' },
     'sev-guest',
+    'arm-confidential-guest',
     'thread-context',
     's390-pv-guest',
     'throttle-group',
@@ -1067,6 +1080,7 @@ 
       'secret_keyring':             { 'type': 'SecretKeyringProperties',
                                       'if': 'CONFIG_SECRET_KEYRING' },
       'sev-guest':                  'SevGuestProperties',
+      'arm-confidential-guest':     'ArmConfidentialGuestProperties',
       'thread-context':             'ThreadContextProperties',
       'throttle-group':             'ThrottleGroupProperties',
       'tls-creds-anon':             'TlsCredsAnonProperties',
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index bb486d36b1..1e23f20972 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -165,6 +165,7 @@  struct VirtMachineState {
     uint32_t clock_phandle;
     uint32_t gic_phandle;
     uint32_t msi_phandle;
+    uint32_t restricted_dma_phandle;
     uint32_t iommu_phandle;
     int psci_conduit;
     hwaddr highest_gpa;
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 3c93c0c0a6..2a3eb4075d 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -84,6 +84,9 @@ 
 #include "hw/virtio/virtio-iommu.h"
 #include "hw/char/pl011.h"
 #include "qemu/guest-random.h"
+#include "sysemu/cpus.h"
+#include "exec/confidential-guest-support.h"
+#include "qom/object_interfaces.h"
 
 static GlobalProperty arm_virt_compat[] = {
     { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "48" },
@@ -1545,6 +1548,11 @@  static void create_pcie(VirtMachineState *vms)
                            nr_pcie_buses - 1);
     qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0);
 
+    if (vms->restricted_dma_phandle) {
+        qemu_fdt_setprop_cell(ms->fdt, nodename, "memory-region",
+                                vms->restricted_dma_phandle);
+    }
+
     if (vms->msi_phandle) {
         qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map",
                                0, vms->msi_phandle, 0, 0x10000);
@@ -2065,6 +2073,129 @@  static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
     }
 }
 
+#define TYPE_ARM_CONFIDENTIAL_GUEST "arm-confidential-guest"
+OBJECT_DECLARE_SIMPLE_TYPE(ArmConfidentialGuestState, ARM_CONFIDENTIAL_GUEST)
+
+struct ArmConfidentialGuestState {
+    ConfidentialGuestSupport parent_obj;
+
+    hwaddr swiotlb_size;
+};
+
+static ArmConfidentialGuestState *acg;
+
+static void
+arm_confidential_guest_instance_init(Object *obj)
+{
+    ArmConfidentialGuestState *acg = ARM_CONFIDENTIAL_GUEST(obj);
+
+    object_property_add_uint64_ptr(obj, "swiotlb-size", &acg->swiotlb_size,
+                                   OBJ_PROP_FLAG_READWRITE);
+}
+
+static const TypeInfo confidential_guest_info = {
+    .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT,
+    .name = TYPE_ARM_CONFIDENTIAL_GUEST,
+    .instance_size = sizeof(ArmConfidentialGuestState),
+    .instance_init = arm_confidential_guest_instance_init,
+    .interfaces = (InterfaceInfo[]) {
+        { TYPE_USER_CREATABLE },
+        { }
+    }
+};
+
+static void
+confidential_guest_register_types(void)
+{
+    type_register_static(&confidential_guest_info);
+}
+type_init(confidential_guest_register_types);
+
+static int confidential_guest_init(MachineState *ms)
+{
+    ConfidentialGuestSupport *cgs = ms->cgs;
+    ArmConfidentialGuestState *obj = (ArmConfidentialGuestState *)
+        object_dynamic_cast(OBJECT(cgs), TYPE_ARM_CONFIDENTIAL_GUEST);
+    const AccelOpsClass *ops = cpus_get_accel();
+
+    if (!obj) {
+        return 0;
+    }
+
+    if (!ops->check_capability ||
+            !ops->check_capability(CONFIDENTIAL_GUEST_SUPPORTED)) {
+        error_report("confidential guests are not supported");
+        return -1;
+    }
+
+    if (obj->swiotlb_size > ms->ram_size) {
+        error_report("swiotlb_size exceeds RAM size");
+        return -1;
+    }
+
+    acg = obj;
+    cgs->ready = true;
+
+    return 0;
+}
+
+static void fdt_add_reserved_memory(VirtMachineState *vms)
+{
+    MachineState *ms = MACHINE(vms);
+    hwaddr membase = vms->memmap[VIRT_MEM].base;
+    hwaddr memsize = ms->ram_size;
+    hwaddr resv_start;
+    const char compat[] = "restricted-dma-pool";
+    const AccelOpsClass *ops = cpus_get_accel();
+    char *nodename;
+
+    if (!acg || !acg->swiotlb_size) {
+        return;
+    }
+
+    nodename = g_strdup_printf("/reserved-memory");
+
+    qemu_fdt_add_subnode(ms->fdt, nodename);
+    qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 2);
+    qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2);
+    qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0);
+    g_free(nodename);
+
+    resv_start = membase + memsize - acg->swiotlb_size;
+    if (ops->check_capability &&
+            ops->check_capability(CONFIDENTIAL_GUEST_CAN_SHARE_MEM_WITH_HOST)) {
+        /*
+         * Indicate only the size of swiotlb buffer needed. Guest will
+         * determine where in its private memory the buffer will be placed and
+         * will use appropriate (hypervisor) APIs to share that with host.
+         */
+        nodename = g_strdup_printf("/reserved-memory/restricted_dma_reserved");
+        qemu_fdt_add_subnode(ms->fdt, nodename);
+        qemu_fdt_setprop_cell(ms->fdt, nodename, "size", acg->swiotlb_size);
+        qemu_fdt_setprop_cell(ms->fdt, nodename, "alignment", 4096);
+    } else {
+        /*
+         * On hypervisors that don't support APIs for guest to share
+         * its (private) memory with host, indicate to the guest where in its
+         * address space shared memory can be found. Host should make arrangents
+         * with hypervisor to assign some memory to guest at the indicated range
+         * and mark it as shared.
+         */
+        nodename = g_strdup_printf("/reserved-memory/restricted_dma_reserved@%"
+                PRIx64, resv_start);
+        qemu_fdt_add_subnode(ms->fdt, nodename);
+        qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
+                                         2, resv_start,
+                                         2, acg->swiotlb_size);
+    }
+
+    vms->restricted_dma_phandle = qemu_fdt_alloc_phandle(ms->fdt);
+    qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+            vms->restricted_dma_phandle);
+    qemu_fdt_setprop(ms->fdt, nodename, "compatible", compat, sizeof(compat));
+    g_free(nodename);
+}
+
 static void machvirt_init(MachineState *machine)
 {
     VirtMachineState *vms = VIRT_MACHINE(machine);
@@ -2075,7 +2206,7 @@  static void machvirt_init(MachineState *machine)
     MemoryRegion *secure_sysmem = NULL;
     MemoryRegion *tag_sysmem = NULL;
     MemoryRegion *secure_tag_sysmem = NULL;
-    int n, virt_max_cpus;
+    int n, virt_max_cpus, ret;
     bool firmware_loaded;
     bool aarch64 = true;
     bool has_ged = !vmc->no_ged;
@@ -2084,6 +2215,12 @@  static void machvirt_init(MachineState *machine)
 
     possible_cpus = mc->possible_cpu_arch_ids(machine);
 
+    ret = confidential_guest_init(machine);
+    if (ret != 0) {
+        error_report("Failed to initialize confidential guest");
+        exit(1);
+    }
+
     /*
      * In accelerated mode, the memory map is computed earlier in kvm_type()
      * to create a VM with the right number of IPA bits.
@@ -2195,6 +2332,8 @@  static void machvirt_init(MachineState *machine)
 
     create_fdt(vms);
 
+    fdt_add_reserved_memory(vms);
+
     assert(possible_cpus->len == max_cpus);
     for (n = 0; n < possible_cpus->len; n++) {
         Object *cpuobj;