diff mbox series

hw/arm/virt: Prevent CPUs in one socket to span mutiple NUMA nodes

Message ID 20230221085352.212938-1-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series hw/arm/virt: Prevent CPUs in one socket to span mutiple NUMA nodes | expand

Commit Message

Gavin Shan Feb. 21, 2023, 8:53 a.m. UTC
Linux kernel guest reports warning when two CPUs in one socket have
been associated with different NUMA nodes, using the following command
lines.

  -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
  -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
  -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
  -numa node,nodeid=2,cpus=4-5,memdev=ram2                \

  ------------[ cut here ]------------
  WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910
  Modules linked in:
  CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
  pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
  pc : build_sched_domains+0x284/0x910
  lr : build_sched_domains+0x184/0x910
  sp : ffff80000804bd50
  x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
  x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
  x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
  x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
  x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
  x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
  x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
  x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
  x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
  x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
  Call trace:
   build_sched_domains+0x284/0x910
   sched_init_domains+0xac/0xe0
   sched_init_smp+0x48/0xc8
   kernel_init_freeable+0x140/0x1ac
   kernel_init+0x28/0x140
   ret_from_fork+0x10/0x20

Fix it by preventing mutiple CPUs in one socket to be associated with
different NUMA nodes.

Reported-by: Yihuang Yu <yihyu@redhat.com>
Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

Comments

Philippe Mathieu-Daudé Feb. 21, 2023, 9:15 a.m. UTC | #1
On 21/2/23 09:53, Gavin Shan wrote:
> Linux kernel guest reports warning when two CPUs in one socket have
> been associated with different NUMA nodes, using the following command
> lines.
> 
>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
> 
>    ------------[ cut here ]------------
>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910
>    Modules linked in:
>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>    pc : build_sched_domains+0x284/0x910
>    lr : build_sched_domains+0x184/0x910
>    sp : ffff80000804bd50
>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>    Call trace:
>     build_sched_domains+0x284/0x910
>     sched_init_domains+0xac/0xe0
>     sched_init_smp+0x48/0xc8
>     kernel_init_freeable+0x140/0x1ac
>     kernel_init+0x28/0x140
>     ret_from_fork+0x10/0x20
> 
> Fix it by preventing mutiple CPUs in one socket to be associated with
> different NUMA nodes.
> 
> Reported-by: Yihuang Yu <yihyu@redhat.com>
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>   1 file changed, 37 insertions(+)
> 
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index ac626b3bef..e0af267c77 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>       return false;
>   }
>   
> +static bool numa_state_valid(MachineState *ms)
> +{
> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
> +    NumaState *state = ms->numa_state;
> +    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
> +    const CPUArchId *cpus = possible_cpus->cpus;
> +    int len = possible_cpus->len, i, j;
> +
> +    if (!state || state->num_nodes <= 1 || len <= 1) {
> +        return true;
> +    }
> +
> +    for (i = 0; i < len; i++) {
> +        for (j = i + 1; j < len; j++) {
> +            if (cpus[i].props.has_socket_id &&
> +                cpus[i].props.has_node_id &&
> +                cpus[j].props.has_socket_id &&
> +                cpus[j].props.has_node_id &&
> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
> +                cpus[i].props.node_id != cpus[j].props.node_id) {
> +                error_report("CPU-%d and CPU-%d in socket-%ld have been "
> +                             "associated with node-%ld and node-%ld",
> +                             i, j, cpus[i].props.socket_id,
> +                             cpus[i].props.node_id,
> +                             cpus[j].props.node_id);
> +                return false;
> +            }
> +        }
> +    }
> +
> +    return true;
> +}
> +
>   static void create_randomness(MachineState *ms, const char *node)
>   {
>       struct {
> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState *machine)
>           exit(1);
>       }
>   
> +    if (!numa_state_valid(machine)) {
> +        exit(1);
> +    }

Why restrict to the virt machine?
Gavin Shan Feb. 21, 2023, 9:21 a.m. UTC | #2
On 2/21/23 8:15 PM, Philippe Mathieu-Daudé wrote:
> On 21/2/23 09:53, Gavin Shan wrote:
>> Linux kernel guest reports warning when two CPUs in one socket have
>> been associated with different NUMA nodes, using the following command
>> lines.
>>
>>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
>>
>>    ------------[ cut here ]------------
>>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910
>>    Modules linked in:
>>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>>    pc : build_sched_domains+0x284/0x910
>>    lr : build_sched_domains+0x184/0x910
>>    sp : ffff80000804bd50
>>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>>    Call trace:
>>     build_sched_domains+0x284/0x910
>>     sched_init_domains+0xac/0xe0
>>     sched_init_smp+0x48/0xc8
>>     kernel_init_freeable+0x140/0x1ac
>>     kernel_init+0x28/0x140
>>     ret_from_fork+0x10/0x20
>>
>> Fix it by preventing mutiple CPUs in one socket to be associated with
>> different NUMA nodes.
>>
>> Reported-by: Yihuang Yu <yihyu@redhat.com>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>>   1 file changed, 37 insertions(+)
>>
>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>> index ac626b3bef..e0af267c77 100644
>> --- a/hw/arm/virt.c
>> +++ b/hw/arm/virt.c
>> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>>       return false;
>>   }
>> +static bool numa_state_valid(MachineState *ms)
>> +{
>> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
>> +    NumaState *state = ms->numa_state;
>> +    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
>> +    const CPUArchId *cpus = possible_cpus->cpus;
>> +    int len = possible_cpus->len, i, j;
>> +
>> +    if (!state || state->num_nodes <= 1 || len <= 1) {
>> +        return true;
>> +    }
>> +
>> +    for (i = 0; i < len; i++) {
>> +        for (j = i + 1; j < len; j++) {
>> +            if (cpus[i].props.has_socket_id &&
>> +                cpus[i].props.has_node_id &&
>> +                cpus[j].props.has_socket_id &&
>> +                cpus[j].props.has_node_id &&
>> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
>> +                cpus[i].props.node_id != cpus[j].props.node_id) {
>> +                error_report("CPU-%d and CPU-%d in socket-%ld have been "
>> +                             "associated with node-%ld and node-%ld",
>> +                             i, j, cpus[i].props.socket_id,
>> +                             cpus[i].props.node_id,
>> +                             cpus[j].props.node_id);
>> +                return false;
>> +            }
>> +        }
>> +    }
>> +
>> +    return true;
>> +}
>> +
>>   static void create_randomness(MachineState *ms, const char *node)
>>   {
>>       struct {
>> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState *machine)
>>           exit(1);
>>       }
>> +    if (!numa_state_valid(machine)) {
>> +        exit(1);
>> +    }
> 
> Why restrict to the virt machine?
> 

We tried x86 machines and virt machine, but the issue isn't reproducible on x86 machines.
So I think it's machine or architecture specific issue. However, I believe RiscV should
have similar issue because linux/drivers/base/arch_topology.c is shared by ARM64 and RiscV.
x86 doesn't use the driver to populate its CPU topology.

Thanks,
Gavin
Philippe Mathieu-Daudé Feb. 21, 2023, 10:21 a.m. UTC | #3
On 21/2/23 10:21, Gavin Shan wrote:
> On 2/21/23 8:15 PM, Philippe Mathieu-Daudé wrote:
>> On 21/2/23 09:53, Gavin Shan wrote:
>>> Linux kernel guest reports warning when two CPUs in one socket have
>>> been associated with different NUMA nodes, using the following command
>>> lines.
>>>
>>>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>>>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>>>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>>>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
>>>
>>>    ------------[ cut here ]------------
>>>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 
>>> build_sched_domains+0x284/0x910
>>>    Modules linked in:
>>>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>>>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>>>    pc : build_sched_domains+0x284/0x910
>>>    lr : build_sched_domains+0x184/0x910
>>>    sp : ffff80000804bd50
>>>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>>>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>>>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>>>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>>>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>>>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>>>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>>>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>>>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>>>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>>>    Call trace:
>>>     build_sched_domains+0x284/0x910
>>>     sched_init_domains+0xac/0xe0
>>>     sched_init_smp+0x48/0xc8
>>>     kernel_init_freeable+0x140/0x1ac
>>>     kernel_init+0x28/0x140
>>>     ret_from_fork+0x10/0x20
>>>
>>> Fix it by preventing mutiple CPUs in one socket to be associated with
>>> different NUMA nodes.
>>>
>>> Reported-by: Yihuang Yu <yihyu@redhat.com>
>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>> ---
>>>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>>>   1 file changed, 37 insertions(+)
>>>
>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>> index ac626b3bef..e0af267c77 100644
>>> --- a/hw/arm/virt.c
>>> +++ b/hw/arm/virt.c
>>> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>>>       return false;
>>>   }
>>> +static bool numa_state_valid(MachineState *ms)
>>> +{
>>> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
>>> +    NumaState *state = ms->numa_state;
>>> +    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
>>> +    const CPUArchId *cpus = possible_cpus->cpus;
>>> +    int len = possible_cpus->len, i, j;
>>> +
>>> +    if (!state || state->num_nodes <= 1 || len <= 1) {
>>> +        return true;
>>> +    }
>>> +
>>> +    for (i = 0; i < len; i++) {
>>> +        for (j = i + 1; j < len; j++) {
>>> +            if (cpus[i].props.has_socket_id &&
>>> +                cpus[i].props.has_node_id &&
>>> +                cpus[j].props.has_socket_id &&
>>> +                cpus[j].props.has_node_id &&
>>> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
>>> +                cpus[i].props.node_id != cpus[j].props.node_id) {
>>> +                error_report("CPU-%d and CPU-%d in socket-%ld have 
>>> been "
>>> +                             "associated with node-%ld and node-%ld",
>>> +                             i, j, cpus[i].props.socket_id,
>>> +                             cpus[i].props.node_id,
>>> +                             cpus[j].props.node_id);
>>> +                return false;
>>> +            }
>>> +        }
>>> +    }
>>> +
>>> +    return true;
>>> +}
>>> +
>>>   static void create_randomness(MachineState *ms, const char *node)
>>>   {
>>>       struct {
>>> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState *machine)
>>>           exit(1);
>>>       }
>>> +    if (!numa_state_valid(machine)) {
>>> +        exit(1);
>>> +    }
>>
>> Why restrict to the virt machine?
>>
> 
> We tried x86 machines and virt machine, but the issue isn't reproducible 
> on x86 machines.
> So I think it's machine or architecture specific issue. However, I 
> believe RiscV should
> have similar issue because linux/drivers/base/arch_topology.c is shared 
> by ARM64 and RiscV.
> x86 doesn't use the driver to populate its CPU topology.

Oh, I haven't thought about the other archs, I meant this seem a generic
issue which affects all (ARM) machines, so why restrict to the (ARM)
virt machine?
Gavin Shan Feb. 21, 2023, 11:12 p.m. UTC | #4
On 2/21/23 9:21 PM, Philippe Mathieu-Daudé wrote:
> On 21/2/23 10:21, Gavin Shan wrote:
>> On 2/21/23 8:15 PM, Philippe Mathieu-Daudé wrote:
>>> On 21/2/23 09:53, Gavin Shan wrote:
>>>> Linux kernel guest reports warning when two CPUs in one socket have
>>>> been associated with different NUMA nodes, using the following command
>>>> lines.
>>>>
>>>>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>>>>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>>>>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>>>>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
>>>>
>>>>    ------------[ cut here ]------------
>>>>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910
>>>>    Modules linked in:
>>>>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>>>>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>>>>    pc : build_sched_domains+0x284/0x910
>>>>    lr : build_sched_domains+0x184/0x910
>>>>    sp : ffff80000804bd50
>>>>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>>>>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>>>>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>>>>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>>>>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>>>>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>>>>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>>>>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>>>>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>>>>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>>>>    Call trace:
>>>>     build_sched_domains+0x284/0x910
>>>>     sched_init_domains+0xac/0xe0
>>>>     sched_init_smp+0x48/0xc8
>>>>     kernel_init_freeable+0x140/0x1ac
>>>>     kernel_init+0x28/0x140
>>>>     ret_from_fork+0x10/0x20
>>>>
>>>> Fix it by preventing mutiple CPUs in one socket to be associated with
>>>> different NUMA nodes.
>>>>
>>>> Reported-by: Yihuang Yu <yihyu@redhat.com>
>>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>>> ---
>>>>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>>>>   1 file changed, 37 insertions(+)
>>>>
>>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>>> index ac626b3bef..e0af267c77 100644
>>>> --- a/hw/arm/virt.c
>>>> +++ b/hw/arm/virt.c
>>>> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>>>>       return false;
>>>>   }
>>>> +static bool numa_state_valid(MachineState *ms)
>>>> +{
>>>> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
>>>> +    NumaState *state = ms->numa_state;
>>>> +    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
>>>> +    const CPUArchId *cpus = possible_cpus->cpus;
>>>> +    int len = possible_cpus->len, i, j;
>>>> +
>>>> +    if (!state || state->num_nodes <= 1 || len <= 1) {
>>>> +        return true;
>>>> +    }
>>>> +
>>>> +    for (i = 0; i < len; i++) {
>>>> +        for (j = i + 1; j < len; j++) {
>>>> +            if (cpus[i].props.has_socket_id &&
>>>> +                cpus[i].props.has_node_id &&
>>>> +                cpus[j].props.has_socket_id &&
>>>> +                cpus[j].props.has_node_id &&
>>>> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
>>>> +                cpus[i].props.node_id != cpus[j].props.node_id) {
>>>> +                error_report("CPU-%d and CPU-%d in socket-%ld have been "
>>>> +                             "associated with node-%ld and node-%ld",
>>>> +                             i, j, cpus[i].props.socket_id,
>>>> +                             cpus[i].props.node_id,
>>>> +                             cpus[j].props.node_id);
>>>> +                return false;
>>>> +            }
>>>> +        }
>>>> +    }
>>>> +
>>>> +    return true;
>>>> +}
>>>> +
>>>>   static void create_randomness(MachineState *ms, const char *node)
>>>>   {
>>>>       struct {
>>>> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState *machine)
>>>>           exit(1);
>>>>       }
>>>> +    if (!numa_state_valid(machine)) {
>>>> +        exit(1);
>>>> +    }
>>>
>>> Why restrict to the virt machine?
>>>
>>
>> We tried x86 machines and virt machine, but the issue isn't reproducible on x86 machines.
>> So I think it's machine or architecture specific issue. However, I believe RiscV should
>> have similar issue because linux/drivers/base/arch_topology.c is shared by ARM64 and RiscV.
>> x86 doesn't use the driver to populate its CPU topology.
> 
> Oh, I haven't thought about the other archs, I meant this seem a generic
> issue which affects all (ARM) machines, so why restrict to the (ARM)
> virt machine?
> 

[Ccing Igor for comments]

Well, virt machine is the only concern to us for now. You're right that all ARM64 and ARM machines
need this check and limitation. So the check needs to be done in the generic path. The best way
I can figure out is like something below. The idea is to introduce a switch to 'struct NumaState'
and do the check in the generic path. The switch is turned on by individual machines. Please let me
know if you have better ideas

- Add 'bool struct NumaState::has_strict_socket_mapping', which is 'false' by default until
   machine specific initialization function calls helper set_numa_strict_socket_mapping(), for
   example in hw/arm/virt.c::virt_instance_init().

- In numa_complete_configuration(), do the check to make sure the socket doesn't cross over
   the NUMA node boundary if 'bool struct NumaState::has_strict_socket_mapping' is true.

Thanks,
Gavin
Philippe Mathieu-Daudé Feb. 21, 2023, 11:31 p.m. UTC | #5
On 22/2/23 00:12, Gavin Shan wrote:
> On 2/21/23 9:21 PM, Philippe Mathieu-Daudé wrote:
>> On 21/2/23 10:21, Gavin Shan wrote:
>>> On 2/21/23 8:15 PM, Philippe Mathieu-Daudé wrote:
>>>> On 21/2/23 09:53, Gavin Shan wrote:
>>>>> Linux kernel guest reports warning when two CPUs in one socket have
>>>>> been associated with different NUMA nodes, using the following command
>>>>> lines.
>>>>>
>>>>>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>>>>>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>>>>>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>>>>>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
>>>>>
>>>>>    ------------[ cut here ]------------
>>>>>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 
>>>>> build_sched_domains+0x284/0x910
>>>>>    Modules linked in:
>>>>>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>>>>>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>>>>>    pc : build_sched_domains+0x284/0x910
>>>>>    lr : build_sched_domains+0x184/0x910
>>>>>    sp : ffff80000804bd50
>>>>>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>>>>>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>>>>>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>>>>>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>>>>>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>>>>>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>>>>>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>>>>>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>>>>>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>>>>>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>>>>>    Call trace:
>>>>>     build_sched_domains+0x284/0x910
>>>>>     sched_init_domains+0xac/0xe0
>>>>>     sched_init_smp+0x48/0xc8
>>>>>     kernel_init_freeable+0x140/0x1ac
>>>>>     kernel_init+0x28/0x140
>>>>>     ret_from_fork+0x10/0x20
>>>>>
>>>>> Fix it by preventing mutiple CPUs in one socket to be associated with
>>>>> different NUMA nodes.
>>>>>
>>>>> Reported-by: Yihuang Yu <yihyu@redhat.com>
>>>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>>>> ---
>>>>>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>>>>>   1 file changed, 37 insertions(+)
>>>>>
>>>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>>>> index ac626b3bef..e0af267c77 100644
>>>>> --- a/hw/arm/virt.c
>>>>> +++ b/hw/arm/virt.c
>>>>> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>>>>>       return false;
>>>>>   }
>>>>> +static bool numa_state_valid(MachineState *ms)
>>>>> +{
>>>>> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
>>>>> +    NumaState *state = ms->numa_state;
>>>>> +    const CPUArchIdList *possible_cpus = 
>>>>> mc->possible_cpu_arch_ids(ms);
>>>>> +    const CPUArchId *cpus = possible_cpus->cpus;
>>>>> +    int len = possible_cpus->len, i, j;
>>>>> +
>>>>> +    if (!state || state->num_nodes <= 1 || len <= 1) {
>>>>> +        return true;
>>>>> +    }
>>>>> +
>>>>> +    for (i = 0; i < len; i++) {
>>>>> +        for (j = i + 1; j < len; j++) {
>>>>> +            if (cpus[i].props.has_socket_id &&
>>>>> +                cpus[i].props.has_node_id &&
>>>>> +                cpus[j].props.has_socket_id &&
>>>>> +                cpus[j].props.has_node_id &&
>>>>> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
>>>>> +                cpus[i].props.node_id != cpus[j].props.node_id) {
>>>>> +                error_report("CPU-%d and CPU-%d in socket-%ld have 
>>>>> been "
>>>>> +                             "associated with node-%ld and node-%ld",
>>>>> +                             i, j, cpus[i].props.socket_id,
>>>>> +                             cpus[i].props.node_id,
>>>>> +                             cpus[j].props.node_id);
>>>>> +                return false;
>>>>> +            }
>>>>> +        }
>>>>> +    }
>>>>> +
>>>>> +    return true;
>>>>> +}
>>>>> +
>>>>>   static void create_randomness(MachineState *ms, const char *node)
>>>>>   {
>>>>>       struct {
>>>>> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState 
>>>>> *machine)
>>>>>           exit(1);
>>>>>       }
>>>>> +    if (!numa_state_valid(machine)) {
>>>>> +        exit(1);
>>>>> +    }
>>>>
>>>> Why restrict to the virt machine?
>>>>
>>>
>>> We tried x86 machines and virt machine, but the issue isn't 
>>> reproducible on x86 machines.
>>> So I think it's machine or architecture specific issue. However, I 
>>> believe RiscV should
>>> have similar issue because linux/drivers/base/arch_topology.c is 
>>> shared by ARM64 and RiscV.
>>> x86 doesn't use the driver to populate its CPU topology.
>>
>> Oh, I haven't thought about the other archs, I meant this seem a generic
>> issue which affects all (ARM) machines, so why restrict to the (ARM)
>> virt machine?
>>
> 
> [Ccing Igor for comments]
> 
> Well, virt machine is the only concern to us for now. You're right that 
> all ARM64 and ARM machines
> need this check and limitation. So the check needs to be done in the 
> generic path. The best way
> I can figure out is like something below. The idea is to introduce a 
> switch to 'struct NumaState'
> and do the check in the generic path. The switch is turned on by 
> individual machines. Please let me
> know if you have better ideas

Can't this be done generically in machine_numa_finish_cpu_init()
-> numa_validate_initiator()?

> - Add 'bool struct NumaState::has_strict_socket_mapping', which is 
> 'false' by default until
>    machine specific initialization function calls helper 
> set_numa_strict_socket_mapping(), for
>    example in hw/arm/virt.c::virt_instance_init().
> 
> - In numa_complete_configuration(), do the check to make sure the socket 
> doesn't cross over
>    the NUMA node boundary if 'bool struct 
> NumaState::has_strict_socket_mapping' is true.
> 
> Thanks,
> Gavin
> 
>
Gavin Shan Feb. 21, 2023, 11:38 p.m. UTC | #6
On 2/22/23 10:31 AM, Philippe Mathieu-Daudé wrote:
> On 22/2/23 00:12, Gavin Shan wrote:
>> On 2/21/23 9:21 PM, Philippe Mathieu-Daudé wrote:
>>> On 21/2/23 10:21, Gavin Shan wrote:
>>>> On 2/21/23 8:15 PM, Philippe Mathieu-Daudé wrote:
>>>>> On 21/2/23 09:53, Gavin Shan wrote:
>>>>>> Linux kernel guest reports warning when two CPUs in one socket have
>>>>>> been associated with different NUMA nodes, using the following command
>>>>>> lines.
>>>>>>
>>>>>>    -smp 6,maxcpus=6,sockets=2,clusters=1,cores=3,threads=1 \
>>>>>>    -numa node,nodeid=0,cpus=0-1,memdev=ram0                \
>>>>>>    -numa node,nodeid=1,cpus=2-3,memdev=ram1                \
>>>>>>    -numa node,nodeid=2,cpus=4-5,memdev=ram2                \
>>>>>>
>>>>>>    ------------[ cut here ]------------
>>>>>>    WARNING: CPU: 0 PID: 1 at kernel/sched/topology.c:2271 build_sched_domains+0x284/0x910
>>>>>>    Modules linked in:
>>>>>>    CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.14.0-268.el9.aarch64 #1
>>>>>>    pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
>>>>>>    pc : build_sched_domains+0x284/0x910
>>>>>>    lr : build_sched_domains+0x184/0x910
>>>>>>    sp : ffff80000804bd50
>>>>>>    x29: ffff80000804bd50 x28: 0000000000000002 x27: 0000000000000000
>>>>>>    x26: ffff800009cf9a80 x25: 0000000000000000 x24: ffff800009cbf840
>>>>>>    x23: ffff000080325000 x22: ffff0000005df800 x21: ffff80000a4ce508
>>>>>>    x20: 0000000000000000 x19: ffff000080324440 x18: 0000000000000014
>>>>>>    x17: 00000000388925c0 x16: 000000005386a066 x15: 000000009c10cc2e
>>>>>>    x14: 00000000000001c0 x13: 0000000000000001 x12: ffff00007fffb1a0
>>>>>>    x11: ffff00007fffb180 x10: ffff80000a4ce508 x9 : 0000000000000041
>>>>>>    x8 : ffff80000a4ce500 x7 : ffff80000a4cf920 x6 : 0000000000000001
>>>>>>    x5 : 0000000000000001 x4 : 0000000000000007 x3 : 0000000000000002
>>>>>>    x2 : 0000000000001000 x1 : ffff80000a4cf928 x0 : 0000000000000001
>>>>>>    Call trace:
>>>>>>     build_sched_domains+0x284/0x910
>>>>>>     sched_init_domains+0xac/0xe0
>>>>>>     sched_init_smp+0x48/0xc8
>>>>>>     kernel_init_freeable+0x140/0x1ac
>>>>>>     kernel_init+0x28/0x140
>>>>>>     ret_from_fork+0x10/0x20
>>>>>>
>>>>>> Fix it by preventing mutiple CPUs in one socket to be associated with
>>>>>> different NUMA nodes.
>>>>>>
>>>>>> Reported-by: Yihuang Yu <yihyu@redhat.com>
>>>>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>>>>> ---
>>>>>>   hw/arm/virt.c | 37 +++++++++++++++++++++++++++++++++++++
>>>>>>   1 file changed, 37 insertions(+)
>>>>>>
>>>>>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>>>>>> index ac626b3bef..e0af267c77 100644
>>>>>> --- a/hw/arm/virt.c
>>>>>> +++ b/hw/arm/virt.c
>>>>>> @@ -230,6 +230,39 @@ static bool cpu_type_valid(const char *cpu)
>>>>>>       return false;
>>>>>>   }
>>>>>> +static bool numa_state_valid(MachineState *ms)
>>>>>> +{
>>>>>> +    MachineClass *mc = MACHINE_GET_CLASS(ms);
>>>>>> +    NumaState *state = ms->numa_state;
>>>>>> +    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
>>>>>> +    const CPUArchId *cpus = possible_cpus->cpus;
>>>>>> +    int len = possible_cpus->len, i, j;
>>>>>> +
>>>>>> +    if (!state || state->num_nodes <= 1 || len <= 1) {
>>>>>> +        return true;
>>>>>> +    }
>>>>>> +
>>>>>> +    for (i = 0; i < len; i++) {
>>>>>> +        for (j = i + 1; j < len; j++) {
>>>>>> +            if (cpus[i].props.has_socket_id &&
>>>>>> +                cpus[i].props.has_node_id &&
>>>>>> +                cpus[j].props.has_socket_id &&
>>>>>> +                cpus[j].props.has_node_id &&
>>>>>> +                cpus[i].props.socket_id == cpus[j].props.socket_id &&
>>>>>> +                cpus[i].props.node_id != cpus[j].props.node_id) {
>>>>>> +                error_report("CPU-%d and CPU-%d in socket-%ld have been "
>>>>>> +                             "associated with node-%ld and node-%ld",
>>>>>> +                             i, j, cpus[i].props.socket_id,
>>>>>> +                             cpus[i].props.node_id,
>>>>>> +                             cpus[j].props.node_id);
>>>>>> +                return false;
>>>>>> +            }
>>>>>> +        }
>>>>>> +    }
>>>>>> +
>>>>>> +    return true;
>>>>>> +}
>>>>>> +
>>>>>>   static void create_randomness(MachineState *ms, const char *node)
>>>>>>   {
>>>>>>       struct {
>>>>>> @@ -2040,6 +2073,10 @@ static void machvirt_init(MachineState *machine)
>>>>>>           exit(1);
>>>>>>       }
>>>>>> +    if (!numa_state_valid(machine)) {
>>>>>> +        exit(1);
>>>>>> +    }
>>>>>
>>>>> Why restrict to the virt machine?
>>>>>
>>>>
>>>> We tried x86 machines and virt machine, but the issue isn't reproducible on x86 machines.
>>>> So I think it's machine or architecture specific issue. However, I believe RiscV should
>>>> have similar issue because linux/drivers/base/arch_topology.c is shared by ARM64 and RiscV.
>>>> x86 doesn't use the driver to populate its CPU topology.
>>>
>>> Oh, I haven't thought about the other archs, I meant this seem a generic
>>> issue which affects all (ARM) machines, so why restrict to the (ARM)
>>> virt machine?
>>>
>>
>> [Ccing Igor for comments]
>>
>> Well, virt machine is the only concern to us for now. You're right that all ARM64 and ARM machines
>> need this check and limitation. So the check needs to be done in the generic path. The best way
>> I can figure out is like something below. The idea is to introduce a switch to 'struct NumaState'
>> and do the check in the generic path. The switch is turned on by individual machines. Please let me
>> know if you have better ideas
> 
> Can't this be done generically in machine_numa_finish_cpu_init()
> -> numa_validate_initiator()?
> 

Yes, machine_numa_finish_cpu_init() is better place, before numa_validate_initiator().

>> - Add 'bool struct NumaState::has_strict_socket_mapping', which is 'false' by default until
>>    machine specific initialization function calls helper set_numa_strict_socket_mapping(), for
>>    example in hw/arm/virt.c::virt_instance_init().
>>
>> - In numa_complete_configuration(), do the check to make sure the socket doesn't cross over
>>    the NUMA node boundary if 'bool struct NumaState::has_strict_socket_mapping' is true.
>>

Thanks,
Gavin
diff mbox series

Patch

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index ac626b3bef..e0af267c77 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -230,6 +230,39 @@  static bool cpu_type_valid(const char *cpu)
     return false;
 }
 
+static bool numa_state_valid(MachineState *ms)
+{
+    MachineClass *mc = MACHINE_GET_CLASS(ms);
+    NumaState *state = ms->numa_state;
+    const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+    const CPUArchId *cpus = possible_cpus->cpus;
+    int len = possible_cpus->len, i, j;
+
+    if (!state || state->num_nodes <= 1 || len <= 1) {
+        return true;
+    }
+
+    for (i = 0; i < len; i++) {
+        for (j = i + 1; j < len; j++) {
+            if (cpus[i].props.has_socket_id &&
+                cpus[i].props.has_node_id &&
+                cpus[j].props.has_socket_id &&
+                cpus[j].props.has_node_id &&
+                cpus[i].props.socket_id == cpus[j].props.socket_id &&
+                cpus[i].props.node_id != cpus[j].props.node_id) {
+                error_report("CPU-%d and CPU-%d in socket-%ld have been "
+                             "associated with node-%ld and node-%ld",
+                             i, j, cpus[i].props.socket_id,
+                             cpus[i].props.node_id,
+                             cpus[j].props.node_id);
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
 static void create_randomness(MachineState *ms, const char *node)
 {
     struct {
@@ -2040,6 +2073,10 @@  static void machvirt_init(MachineState *machine)
         exit(1);
     }
 
+    if (!numa_state_valid(machine)) {
+        exit(1);
+    }
+
     possible_cpus = mc->possible_cpu_arch_ids(machine);
 
     /*