diff mbox series

[v15,07/12] numa: Calculate hmat latency and bandwidth entry list

Message ID 20191107074511.14304-8-tao3.xu@intel.com (mailing list archive)
State New, archived
Headers show
Series Build ACPI Heterogeneous Memory Attribute Table (HMAT) | expand

Commit Message

Tao Xu Nov. 7, 2019, 7:45 a.m. UTC
Compress HMAT latency and bandwidth raw data into uint16_t data,
which can be stored in HMAT table.

Suggested-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Tao Xu <tao3.xu@intel.com>
---

No changes in v15.

Changes in v14:
    - Convert latency from ns to ps, because ACPI 6.3 HMAT table use
      ps as minimum unit
---
 hw/core/numa.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 58 insertions(+), 1 deletion(-)

Comments

Igor Mammedov Nov. 8, 2019, 12:12 p.m. UTC | #1
On Thu,  7 Nov 2019 15:45:06 +0800
Tao Xu <tao3.xu@intel.com> wrote:

> Compress HMAT latency and bandwidth raw data into uint16_t data,
> which can be stored in HMAT table.
> 
> Suggested-by: Igor Mammedov <imammedo@redhat.com>
> Signed-off-by: Tao Xu <tao3.xu@intel.com>
> ---
> 
> No changes in v15.
> 
> Changes in v14:
>     - Convert latency from ns to ps, because ACPI 6.3 HMAT table use
>       ps as minimum unit
> ---
>  hw/core/numa.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 58 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/core/numa.c b/hw/core/numa.c
> index f391760c20..523dd80822 100644
> --- a/hw/core/numa.c
> +++ b/hw/core/numa.c
> @@ -483,6 +483,47 @@ static void complete_init_numa_distance(MachineState *ms)
>      }
>  }
>  
> +static void calculate_hmat_entry_list(HMAT_LB_Info *hmat_lb, int num_nodes)
I'd call it verify_hmat_entry_list()
and I'd only do checks here without post-processing data
into something else, provided there is checks that requires
all hmat-lb option parsed first.

> +{
> +    int i, index;
> +    uint16_t *entry_list;
> +    uint64_t base;
> +    GArray *lb_data_list;
> +    HMAT_LB_Data *lb_data;
> +
> +    if (hmat_lb->data_type <= HMAT_LB_DATA_WRITE_LATENCY) {
> +        base = hmat_lb->base_latency;
> +        lb_data_list = hmat_lb->latency;
> +    } else {
> +        base = hmat_lb->base_bandwidth;
> +        lb_data_list = hmat_lb->bandwidth;
> +    }
> +
> +    entry_list = g_malloc0(lb_data_list->len * sizeof(uint16_t));
> +    for (i = 0; i < lb_data_list->len; i++) {
> +        lb_data = &g_array_index(lb_data_list, HMAT_LB_Data, i);
> +        index = lb_data->initiator * num_nodes + lb_data->target;
> +        if (entry_list[index]) {
> +            error_report("Duplicate configuration of the latency for "
> +                "initiator=%d and target=%d.", lb_data->initiator,
> +                lb_data->target);
Is it possible to detect duplicate during hamt-lb option parsing?


> +            exit(1);
> +        }

> +        entry_list[index] = (uint16_t)(lb_data->rawdata / base);
> +    }
> +    if (hmat_lb->data_type <= HMAT_LB_DATA_WRITE_LATENCY) {
> +        /* Convert latency base from nanoseconds to picosecond */
> +        hmat_lb->base_latency = base * 1000;
> +        hmat_lb->entry_latency = entry_list;
> +    } else {
> +        /* Convert bandwidth base from Byte to Megabyte */
> +        hmat_lb->base_bandwidth = base / MiB;
> +        hmat_lb->entry_bandwidth = entry_list;
> +    }
I suggest to move this hunk to 10/12 and drop entry_foo fields
as build_hmat_lb() can walk over lb_data_list and normalize values
put in ACPI table on its own.

In generic numa code is to check that user provided values won't
(under|over-flow) ACPI table values once dived/multiplied,
but leave actual data packing into table to ACPI code.

> +}
> +
>  void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
>                                   int nb_nodes, ram_addr_t size)
>  {
> @@ -521,9 +562,10 @@ void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
>  
>  void numa_complete_configuration(MachineState *ms)
>  {
> -    int i;
> +    int i, hierarchy, type;
>      MachineClass *mc = MACHINE_GET_CLASS(ms);
>      NodeInfo *numa_info = ms->numa_state->nodes;
> +    HMAT_LB_Info *numa_hmat_lb;
>  
>      /*
>       * If memory hotplug is enabled (slots > 0) but without '-numa'
> @@ -620,6 +662,21 @@ void numa_complete_configuration(MachineState *ms)
>              /* Validation succeeded, now fill in any missing distances. */
>              complete_init_numa_distance(ms);
>          }
> +
> +        if (ms->numa_state->hmat_enabled) {
> +            for (hierarchy = HMAT_LB_MEM_MEMORY;
> +                 hierarchy <= HMAT_LB_MEM_CACHE_3RD_LEVEL; hierarchy++) {
> +                for (type = HMAT_LB_DATA_ACCESS_LATENCY;
> +                    type <= HMAT_LB_DATA_WRITE_BANDWIDTH; type++) {
> +                    numa_hmat_lb = ms->numa_state->hmat_lb[hierarchy][type];
> +
> +                    if (numa_hmat_lb) {
> +                        calculate_hmat_entry_list(numa_hmat_lb,
> +                                                  ms->numa_state->num_nodes);
> +                    }
> +                }
> +            }
> +        }
>      }
>  }
>
diff mbox series

Patch

diff --git a/hw/core/numa.c b/hw/core/numa.c
index f391760c20..523dd80822 100644
--- a/hw/core/numa.c
+++ b/hw/core/numa.c
@@ -483,6 +483,47 @@  static void complete_init_numa_distance(MachineState *ms)
     }
 }
 
+static void calculate_hmat_entry_list(HMAT_LB_Info *hmat_lb, int num_nodes)
+{
+    int i, index;
+    uint16_t *entry_list;
+    uint64_t base;
+    GArray *lb_data_list;
+    HMAT_LB_Data *lb_data;
+
+    if (hmat_lb->data_type <= HMAT_LB_DATA_WRITE_LATENCY) {
+        base = hmat_lb->base_latency;
+        lb_data_list = hmat_lb->latency;
+    } else {
+        base = hmat_lb->base_bandwidth;
+        lb_data_list = hmat_lb->bandwidth;
+    }
+
+    entry_list = g_malloc0(lb_data_list->len * sizeof(uint16_t));
+    for (i = 0; i < lb_data_list->len; i++) {
+        lb_data = &g_array_index(lb_data_list, HMAT_LB_Data, i);
+        index = lb_data->initiator * num_nodes + lb_data->target;
+        if (entry_list[index]) {
+            error_report("Duplicate configuration of the latency for "
+                "initiator=%d and target=%d.", lb_data->initiator,
+                lb_data->target);
+            exit(1);
+        }
+
+        entry_list[index] = (uint16_t)(lb_data->rawdata / base);
+    }
+
+    if (hmat_lb->data_type <= HMAT_LB_DATA_WRITE_LATENCY) {
+        /* Convert latency base from nanoseconds to picosecond */
+        hmat_lb->base_latency = base * 1000;
+        hmat_lb->entry_latency = entry_list;
+    } else {
+        /* Convert bandwidth base from Byte to Megabyte */
+        hmat_lb->base_bandwidth = base / MiB;
+        hmat_lb->entry_bandwidth = entry_list;
+    }
+}
+
 void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
                                  int nb_nodes, ram_addr_t size)
 {
@@ -521,9 +562,10 @@  void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
 
 void numa_complete_configuration(MachineState *ms)
 {
-    int i;
+    int i, hierarchy, type;
     MachineClass *mc = MACHINE_GET_CLASS(ms);
     NodeInfo *numa_info = ms->numa_state->nodes;
+    HMAT_LB_Info *numa_hmat_lb;
 
     /*
      * If memory hotplug is enabled (slots > 0) but without '-numa'
@@ -620,6 +662,21 @@  void numa_complete_configuration(MachineState *ms)
             /* Validation succeeded, now fill in any missing distances. */
             complete_init_numa_distance(ms);
         }
+
+        if (ms->numa_state->hmat_enabled) {
+            for (hierarchy = HMAT_LB_MEM_MEMORY;
+                 hierarchy <= HMAT_LB_MEM_CACHE_3RD_LEVEL; hierarchy++) {
+                for (type = HMAT_LB_DATA_ACCESS_LATENCY;
+                    type <= HMAT_LB_DATA_WRITE_BANDWIDTH; type++) {
+                    numa_hmat_lb = ms->numa_state->hmat_lb[hierarchy][type];
+
+                    if (numa_hmat_lb) {
+                        calculate_hmat_entry_list(numa_hmat_lb,
+                                                  ms->numa_state->num_nodes);
+                    }
+                }
+            }
+        }
     }
 }