diff mbox series

riscv: set max_pfn to the PFN of the last page

Message ID 1587630565-29325-1-git-send-email-vincent.chen@sifive.com (mailing list archive)
State New, archived
Headers show
Series riscv: set max_pfn to the PFN of the last page | expand

Commit Message

Vincent Chen April 23, 2020, 8:29 a.m. UTC
The current max_pfn equals to zero. In this case, I found it caused users
cannot get some page information through /proc such as kpagecount in v5.6
kernel because of new sanity checks. The following message is displayed by
stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t
1" on HiFive unleashed board.

 # stress-ng --verbose --physpage 1 -t 1
 stress-ng: debug: [109] 4 processors online, 4 processors configured
 stress-ng: info: [109] dispatching hogs: 1 physpage
 stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too high) to L0
 stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
 stress-ng: info: [109] cache allocate: using built-in defaults as no suitable cache found
 stress-ng: debug: [109] cache allocate: default cache size: 2048K
 stress-ng: debug: [109] starting stressors
 stress-ng: debug: [109] 1 stressor spawned
 stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
 stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
 stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
 ...
 stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
 stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
 stress-ng: debug: [109] process [110] terminated
 stress-ng: info: [109] successful run completed in 1.00s
 #

After applying this patch, the kernel can pass the test.

 # stress-ng --verbose --physpage 1 -t 1
 stress-ng: debug: [104] 4 processors online, 4 processors configured stress-ng: info: [104] dispatching hogs: 1 physpage
 stress-ng: info: [104] cache allocate: using defaults, can't determine cache details from sysfs
 stress-ng: debug: [104] cache allocate: default cache size: 2048K
 stress-ng: debug: [104] starting stressors
 stress-ng: debug: [104] 1 stressor spawned
 stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng: debug: [104] process [105] terminated
 stress-ng: info: [104] successful run completed in 1.01s
 #

Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
---
 arch/riscv/mm/init.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

Comments

Anup Patel April 23, 2020, 8:32 a.m. UTC | #1
On Thu, Apr 23, 2020 at 1:59 PM Vincent Chen <vincent.chen@sifive.com> wrote:
>
> The current max_pfn equals to zero. In this case, I found it caused users
> cannot get some page information through /proc such as kpagecount in v5.6
> kernel because of new sanity checks. The following message is displayed by
> stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t
> 1" on HiFive unleashed board.
>
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [109] 4 processors online, 4 processors configured
>  stress-ng: info: [109] dispatching hogs: 1 physpage
>  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too high) to L0
>  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
>  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable cache found
>  stress-ng: debug: [109] cache allocate: default cache size: 2048K
>  stress-ng: debug: [109] starting stressors
>  stress-ng: debug: [109] 1 stressor spawned
>  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
>  ...
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
>  stress-ng: debug: [109] process [110] terminated
>  stress-ng: info: [109] successful run completed in 1.00s
>  #
>
> After applying this patch, the kernel can pass the test.
>
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-ng: info: [104] dispatching hogs: 1 physpage
>  stress-ng: info: [104] cache allocate: using defaults, can't determine cache details from sysfs
>  stress-ng: debug: [104] cache allocate: default cache size: 2048K
>  stress-ng: debug: [104] starting stressors
>  stress-ng: debug: [104] 1 stressor spawned
>  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng: debug: [104] process [105] terminated
>  stress-ng: info: [104] successful run completed in 1.01s
>  #
>
> Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> ---
>  arch/riscv/mm/init.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index fab855963c73..157924baa191 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -149,7 +149,8 @@ void __init setup_bootmem(void)
>         memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
>
>         set_max_mapnr(PFN_DOWN(mem_size));
> -       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
> +       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
> +       max_low_pfn = max_pfn;
>
>  #ifdef CONFIG_BLK_DEV_INITRD
>         setup_initrd();
> --
> 2.7.4
>
>

Looks good to me.

Reviewed-by: Anup Patel <anup@brainfault.ort>

If possible add "Fixes:" line and also Cc stable kernel.

Regards,
Anup
Yash Shah April 24, 2020, 4:23 a.m. UTC | #2
> -----Original Message-----
> From: linux-riscv <linux-riscv-bounces@lists.infradead.org> On Behalf Of
> Vincent Chen
> Sent: 23 April 2020 13:59
> To: Paul Walmsley <paul.walmsley@sifive.com>; palmer@dabbelt.com
> Cc: Vincent Chen <vincent.chen@sifive.com>; linux-riscv@lists.infradead.org
> Subject: [PATCH] riscv: set max_pfn to the PFN of the last page
> 
> The current max_pfn equals to zero. In this case, I found it caused users
> cannot get some page information through /proc such as kpagecount in v5.6
> kernel because of new sanity checks. The following message is displayed by
> stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t 1"
> on HiFive unleashed board.
> 
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [109] 4 processors online, 4 processors configured
>  stress-ng: info: [109] dispatching hogs: 1 physpage
>  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too
> high) to L0
>  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
>  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable
> cache found
>  stress-ng: debug: [109] cache allocate: default cache size: 2048K
>  stress-ng: debug: [109] starting stressors
>  stress-ng: debug: [109] 1 stressor spawned
>  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)  ...
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
>  stress-ng: debug: [109] process [110] terminated
>  stress-ng: info: [109] successful run completed in 1.00s  #
> 
> After applying this patch, the kernel can pass the test.
> 
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-
> ng: info: [104] dispatching hogs: 1 physpage
>  stress-ng: info: [104] cache allocate: using defaults, can't determine cache
> details from sysfs
>  stress-ng: debug: [104] cache allocate: default cache size: 2048K
>  stress-ng: debug: [104] starting stressors
>  stress-ng: debug: [104] 1 stressor spawned
>  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-
> ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng:
> debug: [104] process [105] terminated
>  stress-ng: info: [104] successful run completed in 1.01s  #
> 
> Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> ---

The patch looks good to me. I have also verified it on HiFive Unleashed with Linux v5.7-rc2, it's working fine.

Tested-by: Yash Shah <yash.shah@sifive.com>
Reviewed-by: Yash Shah <yash.shah@sifive.com>

- Yash
Vincent Chen April 27, 2020, 5:37 a.m. UTC | #3
On Thu, Apr 23, 2020 at 4:33 PM Anup Patel <anup@brainfault.org> wrote:
>
> On Thu, Apr 23, 2020 at 1:59 PM Vincent Chen <vincent.chen@sifive.com> wrote:
> >
> > The current max_pfn equals to zero. In this case, I found it caused users
> > cannot get some page information through /proc such as kpagecount in v5.6
> > kernel because of new sanity checks. The following message is displayed by
> > stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t
> > 1" on HiFive unleashed board.
> >
> >  # stress-ng --verbose --physpage 1 -t 1
> >  stress-ng: debug: [109] 4 processors online, 4 processors configured
> >  stress-ng: info: [109] dispatching hogs: 1 physpage
> >  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too high) to L0
> >  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
> >  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable cache found
> >  stress-ng: debug: [109] cache allocate: default cache size: 2048K
> >  stress-ng: debug: [109] starting stressors
> >  stress-ng: debug: [109] 1 stressor spawned
> >  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
> >  ...
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
> >  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
> >  stress-ng: debug: [109] process [110] terminated
> >  stress-ng: info: [109] successful run completed in 1.00s
> >  #
> >
> > After applying this patch, the kernel can pass the test.
> >
> >  # stress-ng --verbose --physpage 1 -t 1
> >  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-ng: info: [104] dispatching hogs: 1 physpage
> >  stress-ng: info: [104] cache allocate: using defaults, can't determine cache details from sysfs
> >  stress-ng: debug: [104] cache allocate: default cache size: 2048K
> >  stress-ng: debug: [104] starting stressors
> >  stress-ng: debug: [104] 1 stressor spawned
> >  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng: debug: [104] process [105] terminated
> >  stress-ng: info: [104] successful run completed in 1.01s
> >  #
> >
> > Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> > ---
> >  arch/riscv/mm/init.c | 3 ++-
> >  1 file changed, 2 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> > index fab855963c73..157924baa191 100644
> > --- a/arch/riscv/mm/init.c
> > +++ b/arch/riscv/mm/init.c
> > @@ -149,7 +149,8 @@ void __init setup_bootmem(void)
> >         memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
> >
> >         set_max_mapnr(PFN_DOWN(mem_size));
> > -       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
> > +       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
> > +       max_low_pfn = max_pfn;
> >
> >  #ifdef CONFIG_BLK_DEV_INITRD
> >         setup_initrd();
> > --
> > 2.7.4
> >
> >
>
> Looks good to me.
>
> Reviewed-by: Anup Patel <anup@brainfault.ort>
>
> If possible add "Fixes:" line and also Cc stable kernel.
>

OK, I will add  "Fixes:" line and Cc stable kernel in my next version patch.
Thanks for your feedback.
Vincent Chen April 27, 2020, 5:38 a.m. UTC | #4
On Fri, Apr 24, 2020 at 12:23 PM Yash Shah <yash.shah@sifive.com> wrote:
>
> > -----Original Message-----
> > From: linux-riscv <linux-riscv-bounces@lists.infradead.org> On Behalf Of
> > Vincent Chen
> > Sent: 23 April 2020 13:59
> > To: Paul Walmsley <paul.walmsley@sifive.com>; palmer@dabbelt.com
> > Cc: Vincent Chen <vincent.chen@sifive.com>; linux-riscv@lists.infradead.org
> > Subject: [PATCH] riscv: set max_pfn to the PFN of the last page
> >
> > The current max_pfn equals to zero. In this case, I found it caused users
> > cannot get some page information through /proc such as kpagecount in v5.6
> > kernel because of new sanity checks. The following message is displayed by
> > stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t 1"
> > on HiFive unleashed board.
> >
> >  # stress-ng --verbose --physpage 1 -t 1
> >  stress-ng: debug: [109] 4 processors online, 4 processors configured
> >  stress-ng: info: [109] dispatching hogs: 1 physpage
> >  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too
> > high) to L0
> >  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
> >  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable
> > cache found
> >  stress-ng: debug: [109] cache allocate: default cache size: 2048K
> >  stress-ng: debug: [109] starting stressors
> >  stress-ng: debug: [109] 1 stressor spawned
> >  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> > address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> > address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)  ...
> >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for
> > address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
> >  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
> >  stress-ng: debug: [109] process [110] terminated
> >  stress-ng: info: [109] successful run completed in 1.00s  #
> >
> > After applying this patch, the kernel can pass the test.
> >
> >  # stress-ng --verbose --physpage 1 -t 1
> >  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-
> > ng: info: [104] dispatching hogs: 1 physpage
> >  stress-ng: info: [104] cache allocate: using defaults, can't determine cache
> > details from sysfs
> >  stress-ng: debug: [104] cache allocate: default cache size: 2048K
> >  stress-ng: debug: [104] starting stressors
> >  stress-ng: debug: [104] 1 stressor spawned
> >  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-
> > ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng:
> > debug: [104] process [105] terminated
> >  stress-ng: info: [104] successful run completed in 1.01s  #
> >
> > Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> > ---
>
> The patch looks good to me. I have also verified it on HiFive Unleashed with Linux v5.7-rc2, it's working fine.
>
> Tested-by: Yash Shah <yash.shah@sifive.com>
> Reviewed-by: Yash Shah <yash.shah@sifive.com>
>
> - Yash
>

Thank you for reviewing and testing.
Anup Patel April 27, 2020, 6:14 a.m. UTC | #5
On Mon, Apr 27, 2020 at 11:07 AM Vincent Chen <vincent.chen@sifive.com> wrote:
>
> On Thu, Apr 23, 2020 at 4:33 PM Anup Patel <anup@brainfault.org> wrote:
> >
> > On Thu, Apr 23, 2020 at 1:59 PM Vincent Chen <vincent.chen@sifive.com> wrote:
> > >
> > > The current max_pfn equals to zero. In this case, I found it caused users
> > > cannot get some page information through /proc such as kpagecount in v5.6
> > > kernel because of new sanity checks. The following message is displayed by
> > > stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t
> > > 1" on HiFive unleashed board.
> > >
> > >  # stress-ng --verbose --physpage 1 -t 1
> > >  stress-ng: debug: [109] 4 processors online, 4 processors configured
> > >  stress-ng: info: [109] dispatching hogs: 1 physpage
> > >  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too high) to L0
> > >  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
> > >  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable cache found
> > >  stress-ng: debug: [109] cache allocate: default cache size: 2048K
> > >  stress-ng: debug: [109] starting stressors
> > >  stress-ng: debug: [109] 1 stressor spawned
> > >  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
> > >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
> > >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
> > >  ...
> > >  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
> > >  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
> > >  stress-ng: debug: [109] process [110] terminated
> > >  stress-ng: info: [109] successful run completed in 1.00s
> > >  #
> > >
> > > After applying this patch, the kernel can pass the test.
> > >
> > >  # stress-ng --verbose --physpage 1 -t 1
> > >  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-ng: info: [104] dispatching hogs: 1 physpage
> > >  stress-ng: info: [104] cache allocate: using defaults, can't determine cache details from sysfs
> > >  stress-ng: debug: [104] cache allocate: default cache size: 2048K
> > >  stress-ng: debug: [104] starting stressors
> > >  stress-ng: debug: [104] 1 stressor spawned
> > >  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng: debug: [104] process [105] terminated
> > >  stress-ng: info: [104] successful run completed in 1.01s
> > >  #
> > >
> > > Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> > > ---
> > >  arch/riscv/mm/init.c | 3 ++-
> > >  1 file changed, 2 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> > > index fab855963c73..157924baa191 100644
> > > --- a/arch/riscv/mm/init.c
> > > +++ b/arch/riscv/mm/init.c
> > > @@ -149,7 +149,8 @@ void __init setup_bootmem(void)
> > >         memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
> > >
> > >         set_max_mapnr(PFN_DOWN(mem_size));
> > > -       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
> > > +       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
> > > +       max_low_pfn = max_pfn;
> > >
> > >  #ifdef CONFIG_BLK_DEV_INITRD
> > >         setup_initrd();
> > > --
> > > 2.7.4
> > >
> > >
> >
> > Looks good to me.
> >
> > Reviewed-by: Anup Patel <anup@brainfault.ort>
> >
> > If possible add "Fixes:" line and also Cc stable kernel.
> >
>
> OK, I will add  "Fixes:" line and Cc stable kernel in my next version patch.
> Thanks for your feedback.

There is a typo in the reviewed-by email address above
hence here is the corrected reviewed-by:

Reviewed-by: Anup Patel <anup@brainfault.org>

Thanks,
Anup
Palmer Dabbelt May 4, 2020, 8:01 p.m. UTC | #6
On Thu, 23 Apr 2020 01:29:25 PDT (-0700), vincent.chen@sifive.com wrote:
> The current max_pfn equals to zero. In this case, I found it caused users
> cannot get some page information through /proc such as kpagecount in v5.6
> kernel because of new sanity checks. The following message is displayed by
> stress-ng test suite with the command "stress-ng --verbose --physpage 1 -t
> 1" on HiFive unleashed board.
>
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [109] 4 processors online, 4 processors configured
>  stress-ng: info: [109] dispatching hogs: 1 physpage
>  stress-ng: debug: [109] cache allocate: reducing cache level from L3 (too high) to L0
>  stress-ng: debug: [109] get_cpu_cache: invalid cache_level: 0
>  stress-ng: info: [109] cache allocate: using built-in defaults as no suitable cache found
>  stress-ng: debug: [109] cache allocate: default cache size: 2048K
>  stress-ng: debug: [109] starting stressors
>  stress-ng: debug: [109] 1 stressor spawned
>  stress-ng: debug: [110] stress-ng-physpage: started [110] (instance 0)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd34de000 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
>  ...
>  stress-ng: error: [110] stress-ng-physpage: cannot read page count for address 0x3fd32db078 in /proc/kpagecount, errno=0 (Success)
>  stress-ng: debug: [110] stress-ng-physpage: exited [110] (instance 0)
>  stress-ng: debug: [109] process [110] terminated
>  stress-ng: info: [109] successful run completed in 1.00s
>  #
>
> After applying this patch, the kernel can pass the test.
>
>  # stress-ng --verbose --physpage 1 -t 1
>  stress-ng: debug: [104] 4 processors online, 4 processors configured stress-ng: info: [104] dispatching hogs: 1 physpage
>  stress-ng: info: [104] cache allocate: using defaults, can't determine cache details from sysfs
>  stress-ng: debug: [104] cache allocate: default cache size: 2048K
>  stress-ng: debug: [104] starting stressors
>  stress-ng: debug: [104] 1 stressor spawned
>  stress-ng: debug: [105] stress-ng-physpage: started [105] (instance 0) stress-ng: debug: [105] stress-ng-physpage: exited [105] (instance 0) stress-ng: debug: [104] process [105] terminated
>  stress-ng: info: [104] successful run completed in 1.01s
>  #
>
> Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> ---
>  arch/riscv/mm/init.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
> index fab855963c73..157924baa191 100644
> --- a/arch/riscv/mm/init.c
> +++ b/arch/riscv/mm/init.c
> @@ -149,7 +149,8 @@ void __init setup_bootmem(void)
>  	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
>
>  	set_max_mapnr(PFN_DOWN(mem_size));
> -	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
> +	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
> +	max_low_pfn = max_pfn;
>
>  #ifdef CONFIG_BLK_DEV_INITRD
>  	setup_initrd();

Thanks.  This is on fixes, with a stable tag.  I couldn't find a good Fixes tag
because it appears this has been broken for a long time.
diff mbox series

Patch

diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fab855963c73..157924baa191 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -149,7 +149,8 @@  void __init setup_bootmem(void)
 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
 	set_max_mapnr(PFN_DOWN(mem_size));
-	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+	max_low_pfn = max_pfn;
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	setup_initrd();