diff mbox

[2/2] pc-nvdimm acpi: build ACPI tables for pc-nvdimm devices

Message ID 1451388527-18009-3-git-send-email-haozhong.zhang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Haozhong Zhang Dec. 29, 2015, 11:28 a.m. UTC
Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
devices. The resulting tables are then copied into Xen guest domain so
tha they can be later loaded by Xen hvmloader.

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
 hw/acpi/nvdimm.c     |  5 +++-
 hw/i386/pc.c         |  6 ++++-
 include/hw/xen/xen.h |  2 ++
 xen-hvm.c            | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 82 insertions(+), 2 deletions(-)

Comments

Stefano Stabellini Jan. 4, 2016, 4:01 p.m. UTC | #1
CC'ing the Xen tools maintainers and Anthony.

On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> devices. The resulting tables are then copied into Xen guest domain so
> tha they can be later loaded by Xen hvmloader.
> 
> Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>

How much work would it be to generate the nvdimm acpi tables from the
Xen toolstack?

Getting the tables from QEMU doesn't seem like a good idea to me, unless
we start getting the whole set of ACPI tables that way.


>  hw/acpi/nvdimm.c     |  5 +++-
>  hw/i386/pc.c         |  6 ++++-
>  include/hw/xen/xen.h |  2 ++
>  xen-hvm.c            | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 82 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
> index df1b176..7c4b931 100644
> --- a/hw/acpi/nvdimm.c
> +++ b/hw/acpi/nvdimm.c
> @@ -29,12 +29,15 @@
>  #include "hw/acpi/acpi.h"
>  #include "hw/acpi/aml-build.h"
>  #include "hw/mem/nvdimm.h"
> +#include "hw/mem/pc-nvdimm.h"
> +#include "hw/xen/xen.h"
>  
>  static int nvdimm_plugged_device_list(Object *obj, void *opaque)
>  {
>      GSList **list = opaque;
> +    const char *type_name = xen_enabled() ? TYPE_PC_NVDIMM : TYPE_NVDIMM;
>  
> -    if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
> +    if (object_dynamic_cast(obj, type_name)) {
>          DeviceState *dev = DEVICE(obj);
>  
>          if (dev->realized) { /* only realized NVDIMMs matter */
> diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> index 459260b..fadacf5 100644
> --- a/hw/i386/pc.c
> +++ b/hw/i386/pc.c
> @@ -1186,7 +1186,11 @@ void pc_guest_info_machine_done(Notifier *notifier, void *data)
>          }
>      }
>  
> -    acpi_setup(&guest_info_state->info);
> +    if (!xen_enabled()) {
> +        acpi_setup(&guest_info_state->info);
> +    } else if (xen_hvm_acpi_setup(PC_MACHINE(qdev_get_machine()))) {
> +        error_report("Warning: failed to initialize Xen HVM ACPI tables");
> +    }
>  }
>  
>  PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
> diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> index e90931a..8b705e1 100644
> --- a/include/hw/xen/xen.h
> +++ b/include/hw/xen/xen.h
> @@ -51,4 +51,6 @@ void xen_register_framebuffer(struct MemoryRegion *mr);
>  #  define HVM_MAX_VCPUS 32
>  #endif
>  
> +int xen_hvm_acpi_setup(PCMachineState *pcms);
> +
>  #endif /* QEMU_HW_XEN_H */
> diff --git a/xen-hvm.c b/xen-hvm.c
> index 6ebf43f..f1f5e77 100644
> --- a/xen-hvm.c
> +++ b/xen-hvm.c
> @@ -26,6 +26,13 @@
>  #include <xen/hvm/params.h>
>  #include <xen/hvm/e820.h>
>  
> +#include "qemu/error-report.h"
> +#include "hw/acpi/acpi.h"
> +#include "hw/acpi/aml-build.h"
> +#include "hw/acpi/bios-linker-loader.h"
> +#include "hw/mem/nvdimm.h"
> +#include "hw/mem/pc-nvdimm.h"
> +
>  //#define DEBUG_XEN_HVM
>  
>  #ifdef DEBUG_XEN_HVM
> @@ -1330,6 +1337,70 @@ int xen_hvm_init(PCMachineState *pcms,
>      return 0;
>  }
>  
> +int xen_hvm_acpi_setup(PCMachineState *pcms)
> +{
> +    AcpiBuildTables *hvm_acpi_tables;
> +    GArray *tables_blob, *table_offsets;
> +
> +    ram_addr_t acpi_tables_addr, acpi_tables_size;
> +    void *host;
> +
> +    struct xs_handle *xs = NULL;
> +    char path[80], value[17];
> +
> +    if (!pcms->nvdimm) {
> +        return 0;
> +    }
> +
> +    hvm_acpi_tables = g_malloc0(sizeof(AcpiBuildTables));
> +    if (!hvm_acpi_tables) {
> +        return -1;
> +    }
> +    acpi_build_tables_init(hvm_acpi_tables);
> +    tables_blob = hvm_acpi_tables->table_data;
> +    table_offsets = g_array_new(false, true, sizeof(uint32_t));
> +    bios_linker_loader_alloc(hvm_acpi_tables->linker,
> +                             ACPI_BUILD_TABLE_FILE, 64, false);
> +
> +    /* build NFIT tables */
> +    nvdimm_build_acpi(table_offsets, tables_blob, hvm_acpi_tables->linker);
> +    g_array_free(table_offsets, true);
> +
> +    /* copy APCI tables into VM */
> +    acpi_tables_size = tables_blob->len;
> +    acpi_tables_addr =
> +        (pcms->below_4g_mem_size - acpi_tables_size) & XC_PAGE_MASK;
> +    host = xc_map_foreign_range(xen_xc, xen_domid,
> +                                ROUND_UP(acpi_tables_size, XC_PAGE_SIZE),
> +                                PROT_READ | PROT_WRITE,
> +                                acpi_tables_addr >> XC_PAGE_SHIFT);
> +    memcpy(host, tables_blob->data, acpi_tables_size);
> +    munmap(host, ROUND_UP(acpi_tables_size, XC_PAGE_SIZE));
> +
> +    /* write address and size of ACPI tables to xenstore */
> +    xs = xs_open(0);
> +    if (xs == NULL) {
> +        error_report("could not contact XenStore\n");
> +        return -1;
> +    }
> +    snprintf(path, sizeof(path),
> +             "/local/domain/%d/hvmloader/dm-acpi/address", xen_domid);
> +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_addr);
> +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> +        error_report("failed to write NFIT base address to xenstore\n");
> +        return -1;
> +    }
> +    snprintf(path, sizeof(path),
> +             "/local/domain/%d/hvmloader/dm-acpi/length", xen_domid);
> +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_size);
> +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> +        error_report("failed to write NFIT size to xenstore\n");
> +        return -1;
> +    }
> +
> +    return 0;
> +}
> +
>  void destroy_hvm_domain(bool reboot)
>  {
>      XenXC xc_handle;
> -- 
> 2.4.8
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
>
Konrad Rzeszutek Wilk Jan. 4, 2016, 9:10 p.m. UTC | #2
On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> CC'ing the Xen tools maintainers and Anthony.
> 
> On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > devices. The resulting tables are then copied into Xen guest domain so
> > tha they can be later loaded by Xen hvmloader.
> > 
> > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> 
> How much work would it be to generate the nvdimm acpi tables from the
> Xen toolstack?

Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
> 
> Getting the tables from QEMU doesn't seem like a good idea to me, unless
> we start getting the whole set of ACPI tables that way.

There is also the ACPI DSDT code - which requires an memory region
to be reserved for the AML code to drop the parameters so that QEMU
can scan the NVDIMM for failures. The region (and size) should be
determined by QEMU since it works on this data.


> 
> 
> >  hw/acpi/nvdimm.c     |  5 +++-
> >  hw/i386/pc.c         |  6 ++++-
> >  include/hw/xen/xen.h |  2 ++
> >  xen-hvm.c            | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  4 files changed, 82 insertions(+), 2 deletions(-)
> > 
> > diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
> > index df1b176..7c4b931 100644
> > --- a/hw/acpi/nvdimm.c
> > +++ b/hw/acpi/nvdimm.c
> > @@ -29,12 +29,15 @@
> >  #include "hw/acpi/acpi.h"
> >  #include "hw/acpi/aml-build.h"
> >  #include "hw/mem/nvdimm.h"
> > +#include "hw/mem/pc-nvdimm.h"
> > +#include "hw/xen/xen.h"
> >  
> >  static int nvdimm_plugged_device_list(Object *obj, void *opaque)
> >  {
> >      GSList **list = opaque;
> > +    const char *type_name = xen_enabled() ? TYPE_PC_NVDIMM : TYPE_NVDIMM;
> >  
> > -    if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
> > +    if (object_dynamic_cast(obj, type_name)) {
> >          DeviceState *dev = DEVICE(obj);
> >  
> >          if (dev->realized) { /* only realized NVDIMMs matter */
> > diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> > index 459260b..fadacf5 100644
> > --- a/hw/i386/pc.c
> > +++ b/hw/i386/pc.c
> > @@ -1186,7 +1186,11 @@ void pc_guest_info_machine_done(Notifier *notifier, void *data)
> >          }
> >      }
> >  
> > -    acpi_setup(&guest_info_state->info);
> > +    if (!xen_enabled()) {
> > +        acpi_setup(&guest_info_state->info);
> > +    } else if (xen_hvm_acpi_setup(PC_MACHINE(qdev_get_machine()))) {
> > +        error_report("Warning: failed to initialize Xen HVM ACPI tables");
> > +    }
> >  }
> >  
> >  PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
> > diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> > index e90931a..8b705e1 100644
> > --- a/include/hw/xen/xen.h
> > +++ b/include/hw/xen/xen.h
> > @@ -51,4 +51,6 @@ void xen_register_framebuffer(struct MemoryRegion *mr);
> >  #  define HVM_MAX_VCPUS 32
> >  #endif
> >  
> > +int xen_hvm_acpi_setup(PCMachineState *pcms);
> > +
> >  #endif /* QEMU_HW_XEN_H */
> > diff --git a/xen-hvm.c b/xen-hvm.c
> > index 6ebf43f..f1f5e77 100644
> > --- a/xen-hvm.c
> > +++ b/xen-hvm.c
> > @@ -26,6 +26,13 @@
> >  #include <xen/hvm/params.h>
> >  #include <xen/hvm/e820.h>
> >  
> > +#include "qemu/error-report.h"
> > +#include "hw/acpi/acpi.h"
> > +#include "hw/acpi/aml-build.h"
> > +#include "hw/acpi/bios-linker-loader.h"
> > +#include "hw/mem/nvdimm.h"
> > +#include "hw/mem/pc-nvdimm.h"
> > +
> >  //#define DEBUG_XEN_HVM
> >  
> >  #ifdef DEBUG_XEN_HVM
> > @@ -1330,6 +1337,70 @@ int xen_hvm_init(PCMachineState *pcms,
> >      return 0;
> >  }
> >  
> > +int xen_hvm_acpi_setup(PCMachineState *pcms)
> > +{
> > +    AcpiBuildTables *hvm_acpi_tables;
> > +    GArray *tables_blob, *table_offsets;
> > +
> > +    ram_addr_t acpi_tables_addr, acpi_tables_size;
> > +    void *host;
> > +
> > +    struct xs_handle *xs = NULL;
> > +    char path[80], value[17];
> > +
> > +    if (!pcms->nvdimm) {
> > +        return 0;
> > +    }
> > +
> > +    hvm_acpi_tables = g_malloc0(sizeof(AcpiBuildTables));
> > +    if (!hvm_acpi_tables) {
> > +        return -1;
> > +    }
> > +    acpi_build_tables_init(hvm_acpi_tables);
> > +    tables_blob = hvm_acpi_tables->table_data;
> > +    table_offsets = g_array_new(false, true, sizeof(uint32_t));
> > +    bios_linker_loader_alloc(hvm_acpi_tables->linker,
> > +                             ACPI_BUILD_TABLE_FILE, 64, false);
> > +
> > +    /* build NFIT tables */
> > +    nvdimm_build_acpi(table_offsets, tables_blob, hvm_acpi_tables->linker);
> > +    g_array_free(table_offsets, true);
> > +
> > +    /* copy APCI tables into VM */
> > +    acpi_tables_size = tables_blob->len;
> > +    acpi_tables_addr =
> > +        (pcms->below_4g_mem_size - acpi_tables_size) & XC_PAGE_MASK;
> > +    host = xc_map_foreign_range(xen_xc, xen_domid,
> > +                                ROUND_UP(acpi_tables_size, XC_PAGE_SIZE),
> > +                                PROT_READ | PROT_WRITE,
> > +                                acpi_tables_addr >> XC_PAGE_SHIFT);
> > +    memcpy(host, tables_blob->data, acpi_tables_size);
> > +    munmap(host, ROUND_UP(acpi_tables_size, XC_PAGE_SIZE));
> > +
> > +    /* write address and size of ACPI tables to xenstore */
> > +    xs = xs_open(0);
> > +    if (xs == NULL) {
> > +        error_report("could not contact XenStore\n");
> > +        return -1;
> > +    }
> > +    snprintf(path, sizeof(path),
> > +             "/local/domain/%d/hvmloader/dm-acpi/address", xen_domid);
> > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_addr);
> > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > +        error_report("failed to write NFIT base address to xenstore\n");
> > +        return -1;
> > +    }
> > +    snprintf(path, sizeof(path),
> > +             "/local/domain/%d/hvmloader/dm-acpi/length", xen_domid);
> > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_size);
> > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > +        error_report("failed to write NFIT size to xenstore\n");
> > +        return -1;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  void destroy_hvm_domain(bool reboot)
> >  {
> >      XenXC xc_handle;
> > -- 
> > 2.4.8
> > 
> > 
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xen.org
> > http://lists.xen.org/xen-devel
> > 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
Haozhong Zhang Jan. 5, 2016, 2:14 a.m. UTC | #3
On 01/04/16 16:01, Stefano Stabellini wrote:
> CC'ing the Xen tools maintainers and Anthony.
> 
> On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > devices. The resulting tables are then copied into Xen guest domain so
> > tha they can be later loaded by Xen hvmloader.
> > 
> > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> 
> How much work would it be to generate the nvdimm acpi tables from the
> Xen toolstack?
>
I think at least two parts of work should be done if generating nvdimm
ACPI tables from toolstack:

(1) An AML builder.
    NVDIMM SSDT table contains a part of definition block, so an AML
    builder is required. If guest ACPI tables is going to be generated
    from toolstack, I think we can port QEMU's AML builder to
    toolstack.

(2) Interface to pass information of vNVDIMM devices from QEMU to Xen.
    Currently, QEMU is responsible to decide the slot IDs of vNVDIMM
    devices, the address space where vNVDIMM devices are mapped to and
    other stuffs that need to be written in ACPI tables. In the
    future, there could be more as planed, including information about
    NVDIMM label areas. If generating NVDIMM ACPI tables from Xen, all
    those information should be passed from QEMU.  Maybe we can pass
    them through xenstore.

> Getting the tables from QEMU doesn't seem like a good idea to me, unless
> we start getting the whole set of ACPI tables that way.
>
As most of current and future vNVDIMM implementation in QEMU would be
related to ACPI, I really want to reuse those code as much as possible.

Haozhong
Stefano Stabellini Jan. 5, 2016, 11 a.m. UTC | #4
On Mon, 4 Jan 2016, Konrad Rzeszutek Wilk wrote:
> On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> > CC'ing the Xen tools maintainers and Anthony.
> > 
> > On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > > devices. The resulting tables are then copied into Xen guest domain so
> > > tha they can be later loaded by Xen hvmloader.
> > > 
> > > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> > 
> > How much work would it be to generate the nvdimm acpi tables from the
> > Xen toolstack?
> 
> Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
>
>
> > Getting the tables from QEMU doesn't seem like a good idea to me, unless
> > we start getting the whole set of ACPI tables that way.
> 
> There is also the ACPI DSDT code - which requires an memory region
> to be reserved for the AML code to drop the parameters so that QEMU
> can scan the NVDIMM for failures. The region (and size) should be
> determined by QEMU since it works on this data.

QEMU can generate the whole set of ACPI tables. Why should we take only
the nvdimm tables and not the others?

I don't think it is wise to have two components which both think are in
control of generating ACPI tables, hvmloader (soon to be the toolstack
with Anthony's work) and QEMU. From an architectural perspective, it
doesn't look robust to me.

Could we take this opportunity to switch to QEMU generating the whole
set of ACPI tables?
 
 
> > >  hw/acpi/nvdimm.c     |  5 +++-
> > >  hw/i386/pc.c         |  6 ++++-
> > >  include/hw/xen/xen.h |  2 ++
> > >  xen-hvm.c            | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> > >  4 files changed, 82 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
> > > index df1b176..7c4b931 100644
> > > --- a/hw/acpi/nvdimm.c
> > > +++ b/hw/acpi/nvdimm.c
> > > @@ -29,12 +29,15 @@
> > >  #include "hw/acpi/acpi.h"
> > >  #include "hw/acpi/aml-build.h"
> > >  #include "hw/mem/nvdimm.h"
> > > +#include "hw/mem/pc-nvdimm.h"
> > > +#include "hw/xen/xen.h"
> > >  
> > >  static int nvdimm_plugged_device_list(Object *obj, void *opaque)
> > >  {
> > >      GSList **list = opaque;
> > > +    const char *type_name = xen_enabled() ? TYPE_PC_NVDIMM : TYPE_NVDIMM;
> > >  
> > > -    if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
> > > +    if (object_dynamic_cast(obj, type_name)) {
> > >          DeviceState *dev = DEVICE(obj);
> > >  
> > >          if (dev->realized) { /* only realized NVDIMMs matter */
> > > diff --git a/hw/i386/pc.c b/hw/i386/pc.c
> > > index 459260b..fadacf5 100644
> > > --- a/hw/i386/pc.c
> > > +++ b/hw/i386/pc.c
> > > @@ -1186,7 +1186,11 @@ void pc_guest_info_machine_done(Notifier *notifier, void *data)
> > >          }
> > >      }
> > >  
> > > -    acpi_setup(&guest_info_state->info);
> > > +    if (!xen_enabled()) {
> > > +        acpi_setup(&guest_info_state->info);
> > > +    } else if (xen_hvm_acpi_setup(PC_MACHINE(qdev_get_machine()))) {
> > > +        error_report("Warning: failed to initialize Xen HVM ACPI tables");
> > > +    }
> > >  }
> > >  
> > >  PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
> > > diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
> > > index e90931a..8b705e1 100644
> > > --- a/include/hw/xen/xen.h
> > > +++ b/include/hw/xen/xen.h
> > > @@ -51,4 +51,6 @@ void xen_register_framebuffer(struct MemoryRegion *mr);
> > >  #  define HVM_MAX_VCPUS 32
> > >  #endif
> > >  
> > > +int xen_hvm_acpi_setup(PCMachineState *pcms);
> > > +
> > >  #endif /* QEMU_HW_XEN_H */
> > > diff --git a/xen-hvm.c b/xen-hvm.c
> > > index 6ebf43f..f1f5e77 100644
> > > --- a/xen-hvm.c
> > > +++ b/xen-hvm.c
> > > @@ -26,6 +26,13 @@
> > >  #include <xen/hvm/params.h>
> > >  #include <xen/hvm/e820.h>
> > >  
> > > +#include "qemu/error-report.h"
> > > +#include "hw/acpi/acpi.h"
> > > +#include "hw/acpi/aml-build.h"
> > > +#include "hw/acpi/bios-linker-loader.h"
> > > +#include "hw/mem/nvdimm.h"
> > > +#include "hw/mem/pc-nvdimm.h"
> > > +
> > >  //#define DEBUG_XEN_HVM
> > >  
> > >  #ifdef DEBUG_XEN_HVM
> > > @@ -1330,6 +1337,70 @@ int xen_hvm_init(PCMachineState *pcms,
> > >      return 0;
> > >  }
> > >  
> > > +int xen_hvm_acpi_setup(PCMachineState *pcms)
> > > +{
> > > +    AcpiBuildTables *hvm_acpi_tables;
> > > +    GArray *tables_blob, *table_offsets;
> > > +
> > > +    ram_addr_t acpi_tables_addr, acpi_tables_size;
> > > +    void *host;
> > > +
> > > +    struct xs_handle *xs = NULL;
> > > +    char path[80], value[17];
> > > +
> > > +    if (!pcms->nvdimm) {
> > > +        return 0;
> > > +    }
> > > +
> > > +    hvm_acpi_tables = g_malloc0(sizeof(AcpiBuildTables));
> > > +    if (!hvm_acpi_tables) {
> > > +        return -1;
> > > +    }
> > > +    acpi_build_tables_init(hvm_acpi_tables);
> > > +    tables_blob = hvm_acpi_tables->table_data;
> > > +    table_offsets = g_array_new(false, true, sizeof(uint32_t));
> > > +    bios_linker_loader_alloc(hvm_acpi_tables->linker,
> > > +                             ACPI_BUILD_TABLE_FILE, 64, false);
> > > +
> > > +    /* build NFIT tables */
> > > +    nvdimm_build_acpi(table_offsets, tables_blob, hvm_acpi_tables->linker);
> > > +    g_array_free(table_offsets, true);
> > > +
> > > +    /* copy APCI tables into VM */
> > > +    acpi_tables_size = tables_blob->len;
> > > +    acpi_tables_addr =
> > > +        (pcms->below_4g_mem_size - acpi_tables_size) & XC_PAGE_MASK;
> > > +    host = xc_map_foreign_range(xen_xc, xen_domid,
> > > +                                ROUND_UP(acpi_tables_size, XC_PAGE_SIZE),
> > > +                                PROT_READ | PROT_WRITE,
> > > +                                acpi_tables_addr >> XC_PAGE_SHIFT);
> > > +    memcpy(host, tables_blob->data, acpi_tables_size);
> > > +    munmap(host, ROUND_UP(acpi_tables_size, XC_PAGE_SIZE));
> > > +
> > > +    /* write address and size of ACPI tables to xenstore */
> > > +    xs = xs_open(0);
> > > +    if (xs == NULL) {
> > > +        error_report("could not contact XenStore\n");
> > > +        return -1;
> > > +    }
> > > +    snprintf(path, sizeof(path),
> > > +             "/local/domain/%d/hvmloader/dm-acpi/address", xen_domid);
> > > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_addr);
> > > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > > +        error_report("failed to write NFIT base address to xenstore\n");
> > > +        return -1;
> > > +    }
> > > +    snprintf(path, sizeof(path),
> > > +             "/local/domain/%d/hvmloader/dm-acpi/length", xen_domid);
> > > +    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_size);
> > > +    if (!xs_write(xs, 0, path, value, strlen(value))) {
> > > +        error_report("failed to write NFIT size to xenstore\n");
> > > +        return -1;
> > > +    }
> > > +
> > > +    return 0;
> > > +}
> > > +
> > >  void destroy_hvm_domain(bool reboot)
> > >  {
> > >      XenXC xc_handle;
> > > -- 
> > > 2.4.8
> > > 
> > > 
> > > _______________________________________________
> > > Xen-devel mailing list
> > > Xen-devel@lists.xen.org
> > > http://lists.xen.org/xen-devel
> > > 
> > 
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xen.org
> > http://lists.xen.org/xen-devel
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
>
Haozhong Zhang Jan. 5, 2016, 2:01 p.m. UTC | #5
On 01/05/16 11:00, Stefano Stabellini wrote:
> On Mon, 4 Jan 2016, Konrad Rzeszutek Wilk wrote:
> > On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> > > CC'ing the Xen tools maintainers and Anthony.
> > > 
> > > On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > > > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > > > devices. The resulting tables are then copied into Xen guest domain so
> > > > tha they can be later loaded by Xen hvmloader.
> > > > 
> > > > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> > > 
> > > How much work would it be to generate the nvdimm acpi tables from the
> > > Xen toolstack?
> > 
> > Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
> >
> >
> > > Getting the tables from QEMU doesn't seem like a good idea to me, unless
> > > we start getting the whole set of ACPI tables that way.
> > 
> > There is also the ACPI DSDT code - which requires an memory region
> > to be reserved for the AML code to drop the parameters so that QEMU
> > can scan the NVDIMM for failures. The region (and size) should be
> > determined by QEMU since it works on this data.
> 
> QEMU can generate the whole set of ACPI tables. Why should we take only
> the nvdimm tables and not the others?
>
NVDIMM tables are the only tables required to support vNVDIMM in this
patch series, and they are self-contained and not conflict with other
existing tables built by hvmloader. For other tables built by QEMU, I
have no idea whether they could work with Xen, so I only take nvdimm
tables from QEMU.

> I don't think it is wise to have two components which both think are in
> control of generating ACPI tables, hvmloader (soon to be the toolstack
> with Anthony's work) and QEMU. From an architectural perspective, it
> doesn't look robust to me.
>
Do you mean whenever nvdimm code in QEMU is changed, we would have to
make more efforts to ensure it still works with Xen?

> Could we take this opportunity to switch to QEMU generating the whole
> set of ACPI tables?
>
Not quite sure how much effort would be taken on this change. CCed
hvmloader maintainers for their comments.

Thanks,
Haozhong
Konrad Rzeszutek Wilk Jan. 6, 2016, 2:50 p.m. UTC | #6
On Tue, Jan 05, 2016 at 10:01:26PM +0800, Haozhong Zhang wrote:
> On 01/05/16 11:00, Stefano Stabellini wrote:
> > On Mon, 4 Jan 2016, Konrad Rzeszutek Wilk wrote:
> > > On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> > > > CC'ing the Xen tools maintainers and Anthony.
> > > > 
> > > > On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > > > > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > > > > devices. The resulting tables are then copied into Xen guest domain so
> > > > > tha they can be later loaded by Xen hvmloader.
> > > > > 
> > > > > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> > > > 
> > > > How much work would it be to generate the nvdimm acpi tables from the
> > > > Xen toolstack?
> > > 
> > > Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
> > >
> > >
> > > > Getting the tables from QEMU doesn't seem like a good idea to me, unless
> > > > we start getting the whole set of ACPI tables that way.
> > > 
> > > There is also the ACPI DSDT code - which requires an memory region
> > > to be reserved for the AML code to drop the parameters so that QEMU
> > > can scan the NVDIMM for failures. The region (and size) should be
> > > determined by QEMU since it works on this data.
> > 
> > QEMU can generate the whole set of ACPI tables. Why should we take only
> > the nvdimm tables and not the others?
> >
> NVDIMM tables are the only tables required to support vNVDIMM in this
> patch series, and they are self-contained and not conflict with other
> existing tables built by hvmloader. For other tables built by QEMU, I
> have no idea whether they could work with Xen, so I only take nvdimm
> tables from QEMU.

But you also have to deal with the SSDT code right? And I was under the
impression that if you use hvmloader - it loads the ACPI DSDT which
it had compiled at build-time - only - but not the SSDT?

In other way - just having the ACPI NFIT won't give you the whole
picture - unless you also jam in ACPI _DSM (ACPI0012) methods as
part of the SSDT right?

Or does the ACPI tables generation also include an ACPI SSDT with the
ACPI _DSM methods as part of it? I only see 'nvdimm_build_acpi' and
NFIT comments, nothing about SSDT?

> 
> > I don't think it is wise to have two components which both think are in
> > control of generating ACPI tables, hvmloader (soon to be the toolstack
> > with Anthony's work) and QEMU. From an architectural perspective, it
> > doesn't look robust to me.
> >
> Do you mean whenever nvdimm code in QEMU is changed, we would have to
> make more efforts to ensure it still works with Xen?

Not sure I follow that. How is it different from the efforts to ensure
that the patches here (which only add one ACPI MADT table)
provide the proper support? Wouldn't you do the same type of checking
every time you modify the nvdimm_build_acpi code?


> 
> > Could we take this opportunity to switch to QEMU generating the whole
> > set of ACPI tables?
> >
> Not quite sure how much effort would be taken on this change. CCed
> hvmloader maintainers for their comments.
> 
> Thanks,
> Haozhong
Haozhong Zhang Jan. 6, 2016, 3:24 p.m. UTC | #7
On 01/06/16 09:50, Konrad Rzeszutek Wilk wrote:
> On Tue, Jan 05, 2016 at 10:01:26PM +0800, Haozhong Zhang wrote:
> > On 01/05/16 11:00, Stefano Stabellini wrote:
> > > On Mon, 4 Jan 2016, Konrad Rzeszutek Wilk wrote:
> > > > On Mon, Jan 04, 2016 at 04:01:08PM +0000, Stefano Stabellini wrote:
> > > > > CC'ing the Xen tools maintainers and Anthony.
> > > > > 
> > > > > On Tue, 29 Dec 2015, Haozhong Zhang wrote:
> > > > > > Reuse existing NVDIMM ACPI code to build ACPI tables for pc-nvdimm
> > > > > > devices. The resulting tables are then copied into Xen guest domain so
> > > > > > tha they can be later loaded by Xen hvmloader.
> > > > > > 
> > > > > > Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
> > > > > 
> > > > > How much work would it be to generate the nvdimm acpi tables from the
> > > > > Xen toolstack?
> > > > 
> > > > Why duplicate the code? The QEMU generates the NFIT tables and its sub-tables.
> > > >
> > > >
> > > > > Getting the tables from QEMU doesn't seem like a good idea to me, unless
> > > > > we start getting the whole set of ACPI tables that way.
> > > > 
> > > > There is also the ACPI DSDT code - which requires an memory region
> > > > to be reserved for the AML code to drop the parameters so that QEMU
> > > > can scan the NVDIMM for failures. The region (and size) should be
> > > > determined by QEMU since it works on this data.
> > > 
> > > QEMU can generate the whole set of ACPI tables. Why should we take only
> > > the nvdimm tables and not the others?
> > >
> > NVDIMM tables are the only tables required to support vNVDIMM in this
> > patch series, and they are self-contained and not conflict with other
> > existing tables built by hvmloader. For other tables built by QEMU, I
> > have no idea whether they could work with Xen, so I only take nvdimm
> > tables from QEMU.
> 
> But you also have to deal with the SSDT code right?
NVDIMM's SSDT tables including DSM methods are also copied from QEMU.

> And I was under the
> impression that if you use hvmloader - it loads the ACPI DSDT which
> it had compiled at build-time - only - but not the SSDT?
>

(may be I missed something) Why do we bother with DSDT? It does not
contain information or methods of NVDIMM. 

> In other way - just having the ACPI NFIT won't give you the whole
> picture - unless you also jam in ACPI _DSM (ACPI0012) methods as
> part of the SSDT right?
>

Right, _DSM methods are also in SSDT and copied from QEMU. 

> Or does the ACPI tables generation also include an ACPI SSDT with the
> ACPI _DSM methods as part of it? I only see 'nvdimm_build_acpi' and
> NFIT comments, nothing about SSDT?
>

Not sure if we are looking at the same code. For QEMU commit 38a762f,
nvdimm_build_acpi() calls nvdimm_build_ssdt() to build SSDT of NVDIMM.

> > 
> > > I don't think it is wise to have two components which both think are in
> > > control of generating ACPI tables, hvmloader (soon to be the toolstack
> > > with Anthony's work) and QEMU. From an architectural perspective, it
> > > doesn't look robust to me.
> > >
> > Do you mean whenever nvdimm code in QEMU is changed, we would have to
> > make more efforts to ensure it still works with Xen?
> 
> Not sure I follow that. How is it different from the efforts to ensure
> that the patches here (which only add one ACPI MADT table)
> provide the proper support? Wouldn't you do the same type of checking
> every time you modify the nvdimm_build_acpi code?
>

Yes, I do have to check. I was just not clear what 'robust' meant here
and asked so.

Haozhong

> 
> > 
> > > Could we take this opportunity to switch to QEMU generating the whole
> > > set of ACPI tables?
> > >
> > Not quite sure how much effort would be taken on this change. CCed
> > hvmloader maintainers for their comments.
> > 
> > Thanks,
> > Haozhong
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
diff mbox

Patch

diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index df1b176..7c4b931 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -29,12 +29,15 @@ 
 #include "hw/acpi/acpi.h"
 #include "hw/acpi/aml-build.h"
 #include "hw/mem/nvdimm.h"
+#include "hw/mem/pc-nvdimm.h"
+#include "hw/xen/xen.h"
 
 static int nvdimm_plugged_device_list(Object *obj, void *opaque)
 {
     GSList **list = opaque;
+    const char *type_name = xen_enabled() ? TYPE_PC_NVDIMM : TYPE_NVDIMM;
 
-    if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
+    if (object_dynamic_cast(obj, type_name)) {
         DeviceState *dev = DEVICE(obj);
 
         if (dev->realized) { /* only realized NVDIMMs matter */
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 459260b..fadacf5 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1186,7 +1186,11 @@  void pc_guest_info_machine_done(Notifier *notifier, void *data)
         }
     }
 
-    acpi_setup(&guest_info_state->info);
+    if (!xen_enabled()) {
+        acpi_setup(&guest_info_state->info);
+    } else if (xen_hvm_acpi_setup(PC_MACHINE(qdev_get_machine()))) {
+        error_report("Warning: failed to initialize Xen HVM ACPI tables");
+    }
 }
 
 PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index e90931a..8b705e1 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -51,4 +51,6 @@  void xen_register_framebuffer(struct MemoryRegion *mr);
 #  define HVM_MAX_VCPUS 32
 #endif
 
+int xen_hvm_acpi_setup(PCMachineState *pcms);
+
 #endif /* QEMU_HW_XEN_H */
diff --git a/xen-hvm.c b/xen-hvm.c
index 6ebf43f..f1f5e77 100644
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -26,6 +26,13 @@ 
 #include <xen/hvm/params.h>
 #include <xen/hvm/e820.h>
 
+#include "qemu/error-report.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/mem/nvdimm.h"
+#include "hw/mem/pc-nvdimm.h"
+
 //#define DEBUG_XEN_HVM
 
 #ifdef DEBUG_XEN_HVM
@@ -1330,6 +1337,70 @@  int xen_hvm_init(PCMachineState *pcms,
     return 0;
 }
 
+int xen_hvm_acpi_setup(PCMachineState *pcms)
+{
+    AcpiBuildTables *hvm_acpi_tables;
+    GArray *tables_blob, *table_offsets;
+
+    ram_addr_t acpi_tables_addr, acpi_tables_size;
+    void *host;
+
+    struct xs_handle *xs = NULL;
+    char path[80], value[17];
+
+    if (!pcms->nvdimm) {
+        return 0;
+    }
+
+    hvm_acpi_tables = g_malloc0(sizeof(AcpiBuildTables));
+    if (!hvm_acpi_tables) {
+        return -1;
+    }
+    acpi_build_tables_init(hvm_acpi_tables);
+    tables_blob = hvm_acpi_tables->table_data;
+    table_offsets = g_array_new(false, true, sizeof(uint32_t));
+    bios_linker_loader_alloc(hvm_acpi_tables->linker,
+                             ACPI_BUILD_TABLE_FILE, 64, false);
+
+    /* build NFIT tables */
+    nvdimm_build_acpi(table_offsets, tables_blob, hvm_acpi_tables->linker);
+    g_array_free(table_offsets, true);
+
+    /* copy APCI tables into VM */
+    acpi_tables_size = tables_blob->len;
+    acpi_tables_addr =
+        (pcms->below_4g_mem_size - acpi_tables_size) & XC_PAGE_MASK;
+    host = xc_map_foreign_range(xen_xc, xen_domid,
+                                ROUND_UP(acpi_tables_size, XC_PAGE_SIZE),
+                                PROT_READ | PROT_WRITE,
+                                acpi_tables_addr >> XC_PAGE_SHIFT);
+    memcpy(host, tables_blob->data, acpi_tables_size);
+    munmap(host, ROUND_UP(acpi_tables_size, XC_PAGE_SIZE));
+
+    /* write address and size of ACPI tables to xenstore */
+    xs = xs_open(0);
+    if (xs == NULL) {
+        error_report("could not contact XenStore\n");
+        return -1;
+    }
+    snprintf(path, sizeof(path),
+             "/local/domain/%d/hvmloader/dm-acpi/address", xen_domid);
+    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_addr);
+    if (!xs_write(xs, 0, path, value, strlen(value))) {
+        error_report("failed to write NFIT base address to xenstore\n");
+        return -1;
+    }
+    snprintf(path, sizeof(path),
+             "/local/domain/%d/hvmloader/dm-acpi/length", xen_domid);
+    snprintf(value, sizeof(value), "%"PRIu64, (uint64_t) acpi_tables_size);
+    if (!xs_write(xs, 0, path, value, strlen(value))) {
+        error_report("failed to write NFIT size to xenstore\n");
+        return -1;
+    }
+
+    return 0;
+}
+
 void destroy_hvm_domain(bool reboot)
 {
     XenXC xc_handle;