diff mbox series

[mm] mm/page_alloc: Avoid second trylock of zone->lock

Message ID 20250331002809.94758-1-alexei.starovoitov@gmail.com (mailing list archive)
State New
Delegated to: BPF
Headers show
Series [mm] mm/page_alloc: Avoid second trylock of zone->lock | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-17 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-12 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-19 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-20 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18

Commit Message

Alexei Starovoitov March 31, 2025, 12:28 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

spin_trylock followed by spin_lock will cause extra write cache
access. If the lock is contended it may cause unnecessary cache
line bouncing and will execute redundant irq restore/save pair.
Therefore, check alloc/fpi_flags first and use spin_trylock or
spin_lock.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 mm/page_alloc.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

Comments

Sebastian Andrzej Siewior March 31, 2025, 8:11 a.m. UTC | #1
On 2025-03-30 17:28:09 [-0700], Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> spin_trylock followed by spin_lock will cause extra write cache
> access. If the lock is contended it may cause unnecessary cache
> line bouncing and will execute redundant irq restore/save pair.
> Therefore, check alloc/fpi_flags first and use spin_trylock or
> spin_lock.
> 
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Sebastian
Michal Hocko March 31, 2025, 10:52 a.m. UTC | #2
On Sun 30-03-25 17:28:09, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> spin_trylock followed by spin_lock will cause extra write cache
> access. If the lock is contended it may cause unnecessary cache
> line bouncing and will execute redundant irq restore/save pair.
> Therefore, check alloc/fpi_flags first and use spin_trylock or
> spin_lock.
> 
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Makes sense. Fixes tag is probably over reaching but whatever.
Acked-by: Michal Hocko <mhocko@suse.com>

Thanks!

> ---
>  mm/page_alloc.c | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index e3ea5bf5c459..ffbb5678bc2f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1268,11 +1268,12 @@ static void free_one_page(struct zone *zone, struct page *page,
>  	struct llist_head *llhead;
>  	unsigned long flags;
>  
> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -		if (unlikely(fpi_flags & FPI_TRYLOCK)) {
> +	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags)) {
>  			add_page_to_zone_llist(zone, page, order);
>  			return;
>  		}
> +	} else {
>  		spin_lock_irqsave(&zone->lock, flags);
>  	}
>  
> @@ -2341,9 +2342,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
>  	unsigned long flags;
>  	int i;
>  
> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -		if (unlikely(alloc_flags & ALLOC_TRYLOCK))
> +	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags))
>  			return 0;
> +	} else {
>  		spin_lock_irqsave(&zone->lock, flags);
>  	}
>  	for (i = 0; i < count; ++i) {
> @@ -2964,9 +2966,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
>  
>  	do {
>  		page = NULL;
> -		if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -			if (unlikely(alloc_flags & ALLOC_TRYLOCK))
> +		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +			if (!spin_trylock_irqsave(&zone->lock, flags))
>  				return NULL;
> +		} else {
>  			spin_lock_irqsave(&zone->lock, flags);
>  		}
>  		if (alloc_flags & ALLOC_HIGHATOMIC)
> -- 
> 2.47.1
Vlastimil Babka March 31, 2025, 12:17 p.m. UTC | #3
On 3/31/25 12:52, Michal Hocko wrote:
> On Sun 30-03-25 17:28:09, Alexei Starovoitov wrote:
>> From: Alexei Starovoitov <ast@kernel.org>
>> 
>> spin_trylock followed by spin_lock will cause extra write cache
>> access. If the lock is contended it may cause unnecessary cache
>> line bouncing

Right.

> and will execute redundant irq restore/save pair.

Maybe that part matters less if we're likely to have to spin anyway - it
doesn't affect other cpus at least unlike the bouncing.

>> Therefore, check alloc/fpi_flags first and use spin_trylock or
>> spin_lock.

Yeah this should be still ok for the zone lock as the fast paths are using
pcplists, so we still shouldn't be making page allocator slower due to the
try_alloc addition.

>> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
>> Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
>> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> 
> Makes sense. Fixes tag is probably over reaching but whatever.

It's fixing 6.15-rc1 code so no possible stable implications anyway.

> Acked-by: Michal Hocko <mhocko@suse.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> Thanks!
> 
>> ---
>>  mm/page_alloc.c | 15 +++++++++------
>>  1 file changed, 9 insertions(+), 6 deletions(-)
>> 
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index e3ea5bf5c459..ffbb5678bc2f 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -1268,11 +1268,12 @@ static void free_one_page(struct zone *zone, struct page *page,
>>  	struct llist_head *llhead;
>>  	unsigned long flags;
>>  
>> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
>> -		if (unlikely(fpi_flags & FPI_TRYLOCK)) {
>> +	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
>> +		if (!spin_trylock_irqsave(&zone->lock, flags)) {
>>  			add_page_to_zone_llist(zone, page, order);
>>  			return;
>>  		}
>> +	} else {
>>  		spin_lock_irqsave(&zone->lock, flags);
>>  	}
>>  
>> @@ -2341,9 +2342,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
>>  	unsigned long flags;
>>  	int i;
>>  
>> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
>> -		if (unlikely(alloc_flags & ALLOC_TRYLOCK))
>> +	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
>> +		if (!spin_trylock_irqsave(&zone->lock, flags))
>>  			return 0;
>> +	} else {
>>  		spin_lock_irqsave(&zone->lock, flags);
>>  	}
>>  	for (i = 0; i < count; ++i) {
>> @@ -2964,9 +2966,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
>>  
>>  	do {
>>  		page = NULL;
>> -		if (!spin_trylock_irqsave(&zone->lock, flags)) {
>> -			if (unlikely(alloc_flags & ALLOC_TRYLOCK))
>> +		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
>> +			if (!spin_trylock_irqsave(&zone->lock, flags))
>>  				return NULL;
>> +		} else {
>>  			spin_lock_irqsave(&zone->lock, flags);
>>  		}
>>  		if (alloc_flags & ALLOC_HIGHATOMIC)
>> -- 
>> 2.47.1
>
Alexei Starovoitov March 31, 2025, 4:59 p.m. UTC | #4
On Mon, Mar 31, 2025 at 5:17 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> >> Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
> >> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> >
> > Makes sense. Fixes tag is probably over reaching but whatever.
>
> It's fixing 6.15-rc1 code so no possible stable implications anyway.

All true. I added the Fixes tag only because if I didn't then
somebody would question why the tag is missing :)

I often look at "Fixes:" as "Strongly-related-to:".
We might backport these patches to older kernels way before 6.15
is released, so having a documented way to strongly connect patches
is a good thing.

Thanks for the reviews everyone.
Harry Yoo April 1, 2025, 3:28 a.m. UTC | #5
On Sun, Mar 30, 2025 at 05:28:09PM -0700, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> spin_trylock followed by spin_lock will cause extra write cache
> access. If the lock is contended it may cause unnecessary cache
> line bouncing and will execute redundant irq restore/save pair.
> Therefore, check alloc/fpi_flags first and use spin_trylock or
> spin_lock.
> 
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation")
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---

Looks good to me,
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>

>  mm/page_alloc.c | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index e3ea5bf5c459..ffbb5678bc2f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1268,11 +1268,12 @@ static void free_one_page(struct zone *zone, struct page *page,
>  	struct llist_head *llhead;
>  	unsigned long flags;
>  
> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -		if (unlikely(fpi_flags & FPI_TRYLOCK)) {
> +	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags)) {
>  			add_page_to_zone_llist(zone, page, order);
>  			return;
>  		}
> +	} else {
>  		spin_lock_irqsave(&zone->lock, flags);
>  	}
>  
> @@ -2341,9 +2342,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
>  	unsigned long flags;
>  	int i;
>  
> -	if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -		if (unlikely(alloc_flags & ALLOC_TRYLOCK))
> +	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +		if (!spin_trylock_irqsave(&zone->lock, flags))
>  			return 0;
> +	} else {
>  		spin_lock_irqsave(&zone->lock, flags);
>  	}
>  	for (i = 0; i < count; ++i) {
> @@ -2964,9 +2966,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
>  
>  	do {
>  		page = NULL;
> -		if (!spin_trylock_irqsave(&zone->lock, flags)) {
> -			if (unlikely(alloc_flags & ALLOC_TRYLOCK))
> +		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
> +			if (!spin_trylock_irqsave(&zone->lock, flags))
>  				return NULL;
> +		} else {
>  			spin_lock_irqsave(&zone->lock, flags);
>  		}
>  		if (alloc_flags & ALLOC_HIGHATOMIC)
> -- 
> 2.47.1
> 
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3ea5bf5c459..ffbb5678bc2f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1268,11 +1268,12 @@  static void free_one_page(struct zone *zone, struct page *page,
 	struct llist_head *llhead;
 	unsigned long flags;
 
-	if (!spin_trylock_irqsave(&zone->lock, flags)) {
-		if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+	if (unlikely(fpi_flags & FPI_TRYLOCK)) {
+		if (!spin_trylock_irqsave(&zone->lock, flags)) {
 			add_page_to_zone_llist(zone, page, order);
 			return;
 		}
+	} else {
 		spin_lock_irqsave(&zone->lock, flags);
 	}
 
@@ -2341,9 +2342,10 @@  static int rmqueue_bulk(struct zone *zone, unsigned int order,
 	unsigned long flags;
 	int i;
 
-	if (!spin_trylock_irqsave(&zone->lock, flags)) {
-		if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+	if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+		if (!spin_trylock_irqsave(&zone->lock, flags))
 			return 0;
+	} else {
 		spin_lock_irqsave(&zone->lock, flags);
 	}
 	for (i = 0; i < count; ++i) {
@@ -2964,9 +2966,10 @@  struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
 
 	do {
 		page = NULL;
-		if (!spin_trylock_irqsave(&zone->lock, flags)) {
-			if (unlikely(alloc_flags & ALLOC_TRYLOCK))
+		if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
+			if (!spin_trylock_irqsave(&zone->lock, flags))
 				return NULL;
+		} else {
 			spin_lock_irqsave(&zone->lock, flags);
 		}
 		if (alloc_flags & ALLOC_HIGHATOMIC)