diff mbox series

[1/2] arm64: ptrauth: add pointer authentication Armv8.6 enhanced feature

Message ID 1582117240-15330-2-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: add Armv8.6 pointer authentication | expand

Commit Message

Amit Daniel Kachhap Feb. 19, 2020, 1 p.m. UTC
This patch add changes for Pointer Authentication enhanced features
mandatory for Armv8.6. These features are,

* Uses an enhanced PAC generation logic which hardens finding the
  correct PAC value of the authenticated pointer. However, no code
  change is required for this.

* Fault is generated now when the ptrauth authentication instruction
  fails in authenticating the PAC present in the address. This is
  different from earlier case when such failures just adds an error
  code in the top byte and waits for subsequent load/store to abort.
  The ptrauth instructions which may cause this fault are autiasp,
  retaa etc.

The above features are now represented by additional configurations
for the Address Authentication cpufeature. These different
configurations are now updated to FTR_EXACT instead of FTR_LOWER_SAFE
as they all have different behaviour.

The fault received in the kernel due to FPAC is treated as Illegal
instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
signal code. Note that this is different from earlier ARMv8.3 ptrauth
where signal SIGSEGV is issued due to Pointer authentication failures.

Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
---
 arch/arm64/include/asm/esr.h       |  4 +++-
 arch/arm64/include/asm/exception.h |  1 +
 arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
 arch/arm64/kernel/cpufeature.c     |  4 ++--
 arch/arm64/kernel/entry-common.c   | 25 +++++++++++++++++++++++++
 arch/arm64/kernel/traps.c          | 18 ++++++++++++++++++
 6 files changed, 65 insertions(+), 11 deletions(-)

Comments

Will Deacon Feb. 28, 2020, 11:57 a.m. UTC | #1
On Wed, Feb 19, 2020 at 06:30:39PM +0530, Amit Daniel Kachhap wrote:
> This patch add changes for Pointer Authentication enhanced features
> mandatory for Armv8.6. These features are,
> 
> * Uses an enhanced PAC generation logic which hardens finding the
>   correct PAC value of the authenticated pointer. However, no code
>   change is required for this.
> 
> * Fault is generated now when the ptrauth authentication instruction
>   fails in authenticating the PAC present in the address. This is
>   different from earlier case when such failures just adds an error
>   code in the top byte and waits for subsequent load/store to abort.
>   The ptrauth instructions which may cause this fault are autiasp,
>   retaa etc.
> 
> The above features are now represented by additional configurations
> for the Address Authentication cpufeature. These different
> configurations are now updated to FTR_EXACT instead of FTR_LOWER_SAFE
> as they all have different behaviour.
> 
> The fault received in the kernel due to FPAC is treated as Illegal
> instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
> signal code. Note that this is different from earlier ARMv8.3 ptrauth
> where signal SIGSEGV is issued due to Pointer authentication failures.
> 
> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> ---
>  arch/arm64/include/asm/esr.h       |  4 +++-
>  arch/arm64/include/asm/exception.h |  1 +
>  arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
>  arch/arm64/kernel/cpufeature.c     |  4 ++--
>  arch/arm64/kernel/entry-common.c   | 25 +++++++++++++++++++++++++
>  arch/arm64/kernel/traps.c          | 18 ++++++++++++++++++
>  6 files changed, 65 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index cb29253..5a1406f 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -35,7 +35,9 @@
>  #define ESR_ELx_EC_SYS64	(0x18)
>  #define ESR_ELx_EC_SVE		(0x19)
>  #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
> -/* Unallocated EC: 0x1b - 0x1E */
> +/* Unallocated EC: 0x1B */
> +#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
> +/* Unallocated EC: 0x1D - 0x1E */
>  #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
>  #define ESR_ELx_EC_IABT_LOW	(0x20)
>  #define ESR_ELx_EC_IABT_CUR	(0x21)
> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
> index 7a6e81ca..de76772 100644
> --- a/arch/arm64/include/asm/exception.h
> +++ b/arch/arm64/include/asm/exception.h
> @@ -46,4 +46,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
>  void do_cp15instr(unsigned int esr, struct pt_regs *regs);
>  void do_el0_svc(struct pt_regs *regs);
>  void do_el0_svc_compat(struct pt_regs *regs);
> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
>  #endif	/* __ASM_EXCEPTION_H */
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index b91570f..77728f5 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -585,14 +585,22 @@
>  #define ID_AA64ISAR1_APA_SHIFT		4
>  #define ID_AA64ISAR1_DPB_SHIFT		0
>  
> -#define ID_AA64ISAR1_APA_NI		0x0
> -#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
> -#define ID_AA64ISAR1_API_NI		0x0
> -#define ID_AA64ISAR1_API_IMP_DEF	0x1
> -#define ID_AA64ISAR1_GPA_NI		0x0
> -#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
> -#define ID_AA64ISAR1_GPI_NI		0x0
> -#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
> +#define ID_AA64ISAR1_APA_NI			0x0
> +#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
> +#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
> +#define ID_AA64ISAR1_API_NI			0x0
> +#define ID_AA64ISAR1_API_IMP_DEF		0x1
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
> +#define ID_AA64ISAR1_GPA_NI			0x0
> +#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
> +#define ID_AA64ISAR1_GPI_NI			0x0
> +#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
>  
>  /* id_aa64pfr0 */
>  #define ID_AA64PFR0_CSV3_SHIFT		60
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 8d1c979..a4f8adb 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -154,9 +154,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
>  	ARM64_FTR_END,

Hmm. This is a user-visible change and should probably be in its own patch.
It also means we will no longer advertise PAC on systems where not all of
the cores have "Enhanced PAC"; is that really necessary?

Generally we rely on incremental updates to unsigned ID register fields
being a superset (i.e. compatible with) the old behaviour. If that's not
the case here, then older kernels are broken and we may need new HWCAPs.

Will
Mark Rutland Feb. 28, 2020, 12:03 p.m. UTC | #2
On Fri, Feb 28, 2020 at 11:57:37AM +0000, Will Deacon wrote:
> On Wed, Feb 19, 2020 at 06:30:39PM +0530, Amit Daniel Kachhap wrote:
> > This patch add changes for Pointer Authentication enhanced features
> > mandatory for Armv8.6. These features are,
> > 
> > * Uses an enhanced PAC generation logic which hardens finding the
> >   correct PAC value of the authenticated pointer. However, no code
> >   change is required for this.
> > 
> > * Fault is generated now when the ptrauth authentication instruction
> >   fails in authenticating the PAC present in the address. This is
> >   different from earlier case when such failures just adds an error
> >   code in the top byte and waits for subsequent load/store to abort.
> >   The ptrauth instructions which may cause this fault are autiasp,
> >   retaa etc.
> > 
> > The above features are now represented by additional configurations
> > for the Address Authentication cpufeature. These different
> > configurations are now updated to FTR_EXACT instead of FTR_LOWER_SAFE
> > as they all have different behaviour.
> > 
> > The fault received in the kernel due to FPAC is treated as Illegal
> > instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
> > signal code. Note that this is different from earlier ARMv8.3 ptrauth
> > where signal SIGSEGV is issued due to Pointer authentication failures.
> > 
> > Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> > ---
> >  arch/arm64/include/asm/esr.h       |  4 +++-
> >  arch/arm64/include/asm/exception.h |  1 +
> >  arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
> >  arch/arm64/kernel/cpufeature.c     |  4 ++--
> >  arch/arm64/kernel/entry-common.c   | 25 +++++++++++++++++++++++++
> >  arch/arm64/kernel/traps.c          | 18 ++++++++++++++++++
> >  6 files changed, 65 insertions(+), 11 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> > index cb29253..5a1406f 100644
> > --- a/arch/arm64/include/asm/esr.h
> > +++ b/arch/arm64/include/asm/esr.h
> > @@ -35,7 +35,9 @@
> >  #define ESR_ELx_EC_SYS64	(0x18)
> >  #define ESR_ELx_EC_SVE		(0x19)
> >  #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
> > -/* Unallocated EC: 0x1b - 0x1E */
> > +/* Unallocated EC: 0x1B */
> > +#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
> > +/* Unallocated EC: 0x1D - 0x1E */
> >  #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
> >  #define ESR_ELx_EC_IABT_LOW	(0x20)
> >  #define ESR_ELx_EC_IABT_CUR	(0x21)
> > diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
> > index 7a6e81ca..de76772 100644
> > --- a/arch/arm64/include/asm/exception.h
> > +++ b/arch/arm64/include/asm/exception.h
> > @@ -46,4 +46,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
> >  void do_cp15instr(unsigned int esr, struct pt_regs *regs);
> >  void do_el0_svc(struct pt_regs *regs);
> >  void do_el0_svc_compat(struct pt_regs *regs);
> > +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
> >  #endif	/* __ASM_EXCEPTION_H */
> > diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> > index b91570f..77728f5 100644
> > --- a/arch/arm64/include/asm/sysreg.h
> > +++ b/arch/arm64/include/asm/sysreg.h
> > @@ -585,14 +585,22 @@
> >  #define ID_AA64ISAR1_APA_SHIFT		4
> >  #define ID_AA64ISAR1_DPB_SHIFT		0
> >  
> > -#define ID_AA64ISAR1_APA_NI		0x0
> > -#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
> > -#define ID_AA64ISAR1_API_NI		0x0
> > -#define ID_AA64ISAR1_API_IMP_DEF	0x1
> > -#define ID_AA64ISAR1_GPA_NI		0x0
> > -#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
> > -#define ID_AA64ISAR1_GPI_NI		0x0
> > -#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
> > +#define ID_AA64ISAR1_APA_NI			0x0
> > +#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
> > +#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
> > +#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
> > +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
> > +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
> > +#define ID_AA64ISAR1_API_NI			0x0
> > +#define ID_AA64ISAR1_API_IMP_DEF		0x1
> > +#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
> > +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
> > +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
> > +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
> > +#define ID_AA64ISAR1_GPA_NI			0x0
> > +#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
> > +#define ID_AA64ISAR1_GPI_NI			0x0
> > +#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
> >  
> >  /* id_aa64pfr0 */
> >  #define ID_AA64PFR0_CSV3_SHIFT		60
> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> > index 8d1c979..a4f8adb 100644
> > --- a/arch/arm64/kernel/cpufeature.c
> > +++ b/arch/arm64/kernel/cpufeature.c
> > @@ -154,9 +154,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
> >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
> >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
> >  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
> >  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
> >  	ARM64_FTR_END,
> 
> Hmm. This is a user-visible change and should probably be in its own patch.
> It also means we will no longer advertise PAC on systems where not all of
> the cores have "Enhanced PAC"; is that really necessary?

It matters for KVM, since a guest won't expect the enhanced PAC trap if
the ID registers say it does not have it.

For userspace, the difference is it'll get a SIGILL on the AUT*
instruction rather than a SIGSEGV when using the result of the AUT*
instruction.

> Generally we rely on incremental updates to unsigned ID register fields
> being a superset (i.e. compatible with) the old behaviour. If that's not
> the case here, then older kernels are broken and we may need new HWCAPs.

In this case, the behaviour isn't a strict superset. Enhanced PAC
unconditionally changed the behaviour of AUT* (i.e. there's no opt-in
with a control bit), but it's not clear to me how much this matters for
userspace.

Thanks,
Mark.
Will Deacon Feb. 28, 2020, 12:23 p.m. UTC | #3
On Fri, Feb 28, 2020 at 12:03:14PM +0000, Mark Rutland wrote:
> On Fri, Feb 28, 2020 at 11:57:37AM +0000, Will Deacon wrote:
> > On Wed, Feb 19, 2020 at 06:30:39PM +0530, Amit Daniel Kachhap wrote:
> > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> > > index 8d1c979..a4f8adb 100644
> > > --- a/arch/arm64/kernel/cpufeature.c
> > > +++ b/arch/arm64/kernel/cpufeature.c
> > > @@ -154,9 +154,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
> > >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
> > >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
> > >  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> > > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
> > >  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> > > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> > >  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
> > >  	ARM64_FTR_END,
> > 
> > Hmm. This is a user-visible change and should probably be in its own patch.
> > It also means we will no longer advertise PAC on systems where not all of
> > the cores have "Enhanced PAC"; is that really necessary?
> 
> It matters for KVM, since a guest won't expect the enhanced PAC trap if
> the ID registers say it does not have it.
> 
> For userspace, the difference is it'll get a SIGILL on the AUT*
> instruction rather than a SIGSEGV when using the result of the AUT*
> instruction.

Yes, if PAC is enabled.

> > Generally we rely on incremental updates to unsigned ID register fields
> > being a superset (i.e. compatible with) the old behaviour. If that's not
> > the case here, then older kernels are broken and we may need new HWCAPs.
> 
> In this case, the behaviour isn't a strict superset. Enhanced PAC
> unconditionally changed the behaviour of AUT* (i.e. there's no opt-in
> with a control bit), but it's not clear to me how much this matters for
> userspace.

Doesn't that violate D13.1.3 "Principles of the ID scheme for fields in
ID registers"?

The part I dislike is that older kernels will happily advertise PAC to
userspace on a system with mismatched legacy/enhanced PAC, and so the
change above doesn't make sense for userspace because the HWCAPs are
already unreliable.

Will
Amit Daniel Kachhap March 2, 2020, 12:48 p.m. UTC | #4
Hi Will,

On 2/28/20 5:53 PM, Will Deacon wrote:
> On Fri, Feb 28, 2020 at 12:03:14PM +0000, Mark Rutland wrote:
>> On Fri, Feb 28, 2020 at 11:57:37AM +0000, Will Deacon wrote:
>>> On Wed, Feb 19, 2020 at 06:30:39PM +0530, Amit Daniel Kachhap wrote:
>>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>>>> index 8d1c979..a4f8adb 100644
>>>> --- a/arch/arm64/kernel/cpufeature.c
>>>> +++ b/arch/arm64/kernel/cpufeature.c
>>>> @@ -154,9 +154,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>>>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
>>>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>>>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>>>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
>>>>   	ARM64_FTR_END,
>>>
>>> Hmm. This is a user-visible change and should probably be in its own patch.
>>> It also means we will no longer advertise PAC on systems where not all of
>>> the cores have "Enhanced PAC"; is that really necessary?
>>
>> It matters for KVM, since a guest won't expect the enhanced PAC trap if
>> the ID registers say it does not have it.
>>
>> For userspace, the difference is it'll get a SIGILL on the AUT*
>> instruction rather than a SIGSEGV when using the result of the AUT*
>> instruction.
> 
> Yes, if PAC is enabled.
> 
>>> Generally we rely on incremental updates to unsigned ID register fields
>>> being a superset (i.e. compatible with) the old behaviour. If that's not
>>> the case here, then older kernels are broken and we may need new HWCAPs.
>>
>> In this case, the behaviour isn't a strict superset. Enhanced PAC
>> unconditionally changed the behaviour of AUT* (i.e. there's no opt-in
>> with a control bit), but it's not clear to me how much this matters for
>> userspace.
> 
> Doesn't that violate D13.1.3 "Principles of the ID scheme for fields in
> ID registers"?
> 
> The part I dislike is that older kernels will happily advertise PAC to
> userspace on a system with mismatched legacy/enhanced PAC, and so the
> change above doesn't make sense for userspace because the HWCAPs are
> already unreliable.

How to got about it? Shall I send this part as a separate fix patch
mentioning the requirement for doing it?

//Amit
> 
> Will
>
Will Deacon March 2, 2020, 4:29 p.m. UTC | #5
On Mon, Mar 02, 2020 at 06:18:17PM +0530, Amit Kachhap wrote:
> On 2/28/20 5:53 PM, Will Deacon wrote:
> > On Fri, Feb 28, 2020 at 12:03:14PM +0000, Mark Rutland wrote:
> > > On Fri, Feb 28, 2020 at 11:57:37AM +0000, Will Deacon wrote:
> > > > On Wed, Feb 19, 2020 at 06:30:39PM +0530, Amit Daniel Kachhap wrote:
> > > > > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> > > > > index 8d1c979..a4f8adb 100644
> > > > > --- a/arch/arm64/kernel/cpufeature.c
> > > > > +++ b/arch/arm64/kernel/cpufeature.c
> > > > > @@ -154,9 +154,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
> > > > >   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
> > > > >   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
> > > > >   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > > > > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> > > > > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
> > > > >   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> > > > > -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> > > > > +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> > > > >   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
> > > > >   	ARM64_FTR_END,
> > > > 
> > > > Hmm. This is a user-visible change and should probably be in its own patch.
> > > > It also means we will no longer advertise PAC on systems where not all of
> > > > the cores have "Enhanced PAC"; is that really necessary?
> > > 
> > > It matters for KVM, since a guest won't expect the enhanced PAC trap if
> > > the ID registers say it does not have it.
> > > 
> > > For userspace, the difference is it'll get a SIGILL on the AUT*
> > > instruction rather than a SIGSEGV when using the result of the AUT*
> > > instruction.
> > 
> > Yes, if PAC is enabled.
> > 
> > > > Generally we rely on incremental updates to unsigned ID register fields
> > > > being a superset (i.e. compatible with) the old behaviour. If that's not
> > > > the case here, then older kernels are broken and we may need new HWCAPs.
> > > 
> > > In this case, the behaviour isn't a strict superset. Enhanced PAC
> > > unconditionally changed the behaviour of AUT* (i.e. there's no opt-in
> > > with a control bit), but it's not clear to me how much this matters for
> > > userspace.
> > 
> > Doesn't that violate D13.1.3 "Principles of the ID scheme for fields in
> > ID registers"?
> > 
> > The part I dislike is that older kernels will happily advertise PAC to
> > userspace on a system with mismatched legacy/enhanced PAC, and so the
> > change above doesn't make sense for userspace because the HWCAPs are
> > already unreliable.
> 
> How to got about it? Shall I send this part as a separate fix patch
> mentioning the requirement for doing it?

I didn't see a reply from Mark, but yes, I think this should be a separate
patch. Further, I think it should only affect KVM and not userspace.

Will
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index cb29253..5a1406f 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -35,7 +35,9 @@ 
 #define ESR_ELx_EC_SYS64	(0x18)
 #define ESR_ELx_EC_SVE		(0x19)
 #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
-/* Unallocated EC: 0x1b - 0x1E */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
+/* Unallocated EC: 0x1D - 0x1E */
 #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
 #define ESR_ELx_EC_IABT_LOW	(0x20)
 #define ESR_ELx_EC_IABT_CUR	(0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 7a6e81ca..de76772 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -46,4 +46,5 @@  void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
 void do_cp15instr(unsigned int esr, struct pt_regs *regs);
 void do_el0_svc(struct pt_regs *regs);
 void do_el0_svc_compat(struct pt_regs *regs);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
 #endif	/* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b91570f..77728f5 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -585,14 +585,22 @@ 
 #define ID_AA64ISAR1_APA_SHIFT		4
 #define ID_AA64ISAR1_DPB_SHIFT		0
 
-#define ID_AA64ISAR1_APA_NI		0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
-#define ID_AA64ISAR1_API_NI		0x0
-#define ID_AA64ISAR1_API_IMP_DEF	0x1
-#define ID_AA64ISAR1_GPA_NI		0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
-#define ID_AA64ISAR1_GPI_NI		0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
+#define ID_AA64ISAR1_APA_NI			0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
+#define ID_AA64ISAR1_API_NI			0x0
+#define ID_AA64ISAR1_API_IMP_DEF		0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
+#define ID_AA64ISAR1_GPA_NI			0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
+#define ID_AA64ISAR1_GPI_NI			0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
 
 /* id_aa64pfr0 */
 #define ID_AA64PFR0_CSV3_SHIFT		60
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 8d1c979..a4f8adb 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -154,9 +154,9 @@  static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
 	ARM64_FTR_END,
 };
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index fde5998..2ef5bf5 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -15,6 +15,7 @@ 
 #include <asm/exception.h>
 #include <asm/kprobes.h>
 #include <asm/mmu.h>
+#include <asm/pointer_auth.h>
 #include <asm/sysreg.h>
 
 static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
@@ -66,6 +67,13 @@  static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
 }
 NOKPROBE_SYMBOL(el1_dbg);
 
+static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+	local_daif_inherit(regs);
+	do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el1_fpac);
+
 asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
@@ -92,6 +100,9 @@  asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BRK64:
 		el1_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el1_fpac(regs, esr);
+		break;
 	default:
 		el1_inv(regs, esr);
 	};
@@ -219,6 +230,14 @@  static void notrace el0_svc(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(el0_svc);
 
+static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+	user_exit_irqoff();
+	local_daif_restore(DAIF_PROCCTX);
+	do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el0_fpac);
+
 asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
@@ -261,6 +280,9 @@  asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BRK64:
 		el0_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el0_fpac(regs, esr);
+		break;
 	default:
 		el0_inv(regs, esr);
 	}
@@ -324,6 +346,9 @@  asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BKPT32:
 		el0_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el0_fpac(regs, esr);
+		break;
 	default:
 		el0_inv(regs, esr);
 	}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cf402be..0ef9e98 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -411,6 +411,23 @@  void do_undefinstr(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(do_undefinstr);
 
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
+{
+	const char *desc;
+
+	BUG_ON(!user_mode(regs));
+
+	/* Even if we chose not to use PTRAUTH, the hardware might still trap */
+	if (unlikely(!(system_supports_address_auth()))) {
+		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+		return;
+	}
+
+	desc = "pointer authentication fault";
+	arm64_notify_die(desc, regs, SIGILL, ILL_ILLOPN, (void __user *)regs->pc, esr);
+}
+NOKPROBE_SYMBOL(do_ptrauth_fault);
+
 #define __user_cache_maint(insn, address, res)			\
 	if (address >= user_addr_max()) {			\
 		res = -EFAULT;					\
@@ -763,6 +780,7 @@  static const char *esr_class_str[] = {
 	[ESR_ELx_EC_SYS64]		= "MSR/MRS (AArch64)",
 	[ESR_ELx_EC_SVE]		= "SVE",
 	[ESR_ELx_EC_ERET]		= "ERET/ERETAA/ERETAB",
+	[ESR_ELx_EC_FPAC]		= "FPAC",
 	[ESR_ELx_EC_IMP_DEF]		= "EL3 IMP DEF",
 	[ESR_ELx_EC_IABT_LOW]		= "IABT (lower EL)",
 	[ESR_ELx_EC_IABT_CUR]		= "IABT (current EL)",