diff mbox series

[v2] riscv: Ensure only ASIDLEN is used for sfence.vma

Message ID 20220330214358.3409766-1-alistair.francis@opensource.wdc.com (mailing list archive)
State New, archived
Headers show
Series [v2] riscv: Ensure only ASIDLEN is used for sfence.vma | expand

Commit Message

Alistair Francis March 30, 2022, 9:43 p.m. UTC
From: Alistair Francis <alistair.francis@wdc.com>

When we set the value of context.id using __new_context() we set both
the asid and the current_version with this return statement in
__new_context():

    return asid | ver;

This means that when local_flush_tlb_all_asid() is called with the asid
specified from context.id we can write the incorrect value.

We get away with this as hardware ignores the extra bits, as the RISC-V
specification states:

"bits SXLEN-1:ASIDMAX of the value held in rs2 are reserved for future
standard use. Until their use is defined by a standard extension, they
should be zeroed by software and ignored by current implementations."

but it is still a bug and worth addressing as we are incorrectly setting
extra bits.

This patch uses asid_mask when calling sfence.vma to ensure the asid is
always the correct len (ASIDLEN). This is similar to what we do in
arch/riscv/mm/context.c.

Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods")
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
 arch/riscv/mm/context.c  | 2 +-
 arch/riscv/mm/tlbflush.c | 2 +-
 include/linux/mm_types.h | 2 ++
 3 files changed, 4 insertions(+), 2 deletions(-)

Comments

Damien Le Moal March 30, 2022, 10:33 p.m. UTC | #1
On 3/31/22 06:43, Alistair Francis wrote:
> From: Alistair Francis <alistair.francis@wdc.com>
> 
> When we set the value of context.id using __new_context() we set both
> the asid and the current_version with this return statement in
> __new_context():
> 
>     return asid | ver;
> 
> This means that when local_flush_tlb_all_asid() is called with the asid
> specified from context.id we can write the incorrect value.
> 
> We get away with this as hardware ignores the extra bits, as the RISC-V
> specification states:
> 
> "bits SXLEN-1:ASIDMAX of the value held in rs2 are reserved for future
> standard use. Until their use is defined by a standard extension, they
> should be zeroed by software and ignored by current implementations."
> 
> but it is still a bug and worth addressing as we are incorrectly setting
> extra bits.
> 
> This patch uses asid_mask when calling sfence.vma to ensure the asid is
> always the correct len (ASIDLEN). This is similar to what we do in
> arch/riscv/mm/context.c.
> 
> Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods")
> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
> ---
>  arch/riscv/mm/context.c  | 2 +-
>  arch/riscv/mm/tlbflush.c | 2 +-
>  include/linux/mm_types.h | 2 ++
>  3 files changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 7acbfbd14557..4329fe54176b 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
>  
>  static unsigned long asid_bits;
>  static unsigned long num_asids;
> -static unsigned long asid_mask;
> +unsigned long asid_mask;
>  
>  static atomic_long_t current_version;
>  
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 37ed760d007c..ef701fa83f36 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -42,7 +42,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
>  	/* check if the tlbflush needs to be sent to other CPUs */
>  	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
>  	if (static_branch_unlikely(&use_asid_allocator)) {
> -		unsigned long asid = atomic_long_read(&mm->context.id);
> +		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;

It would be a lot nicer and less error prone to have a small helper
function for this, no ?

>  
>  		if (broadcast) {
>  			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 8834e38c06a4..5fa7cc0af853 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -666,6 +666,8 @@ struct mm_struct {
>  
>  extern struct mm_struct init_mm;
>  
> +extern unsigned long asid_mask;
> +
>  /* Pointer magic because the dynamic array size confuses some compilers. */
>  static inline void mm_init_cpumask(struct mm_struct *mm)
>  {
diff mbox series

Patch

diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 7acbfbd14557..4329fe54176b 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -22,7 +22,7 @@  DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
 
 static unsigned long asid_bits;
 static unsigned long num_asids;
-static unsigned long asid_mask;
+unsigned long asid_mask;
 
 static atomic_long_t current_version;
 
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 37ed760d007c..ef701fa83f36 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -42,7 +42,7 @@  static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
 	/* check if the tlbflush needs to be sent to other CPUs */
 	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
 	if (static_branch_unlikely(&use_asid_allocator)) {
-		unsigned long asid = atomic_long_read(&mm->context.id);
+		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
 
 		if (broadcast) {
 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8834e38c06a4..5fa7cc0af853 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -666,6 +666,8 @@  struct mm_struct {
 
 extern struct mm_struct init_mm;
 
+extern unsigned long asid_mask;
+
 /* Pointer magic because the dynamic array size confuses some compilers. */
 static inline void mm_init_cpumask(struct mm_struct *mm)
 {