diff mbox series

[v2,26/39] x86/cet/shstk: Introduce routines modifying shstk

Message ID 20220929222936.14584-27-rick.p.edgecombe@intel.com (mailing list archive)
State New
Headers show
Series Shadowstacks for userspace | expand

Commit Message

Rick Edgecombe Sept. 29, 2022, 10:29 p.m. UTC
From: Yu-cheng Yu <yu-cheng.yu@intel.com>

Shadow stack's are normally written to via CALL/RET or specific CET
instuctions like RSTORSSP/SAVEPREVSSP. However during some Linux
operations the kernel will need to write to directly using the ring-0 only
WRUSS instruction.

A shadow stack restore token marks a restore point of the shadow stack, and
the address in a token must point directly above the token, which is within
the same shadow stack. This is distinctively different from other pointers
on the shadow stack, since those pointers point to executable code area.

Introduce token setup and verify routines. Also introduce WRUSS, which is
a kernel-mode instruction but writes directly to user shadow stack.

In future patches that enable shadow stack to work with signals, the kernel
will need something to denote the point in the stack where sigreturn may be
called. This will prevent attackers calling sigreturn at arbitrary places
in the stack, in order to help prevent SROP attacks.

To do this, something that can only be written by the kernel needs to be
placed on the shadow stack. This can be accomplished by setting bit 63 in
the frame written to the shadow stack. Userspace return addresses can't
have this bit set as it is in the kernel range. It is also can't be a
valid restore token.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Kees Cook <keescook@chromium.org>

---

v2:
 - Add data helpers for writing to shadow stack.

v1:
 - Use xsave helpers.

Yu-cheng v30:
 - Update commit log, remove description about signals.
 - Update various comments.
 - Remove variable 'ssp' init and adjust return value accordingly.
 - Check get_user_shstk_addr() return value.
 - Replace 'ia32' with 'proc32'.

Yu-cheng v29:
 - Update comments for the use of get_xsave_addr().

 arch/x86/include/asm/special_insns.h |  13 ++++
 arch/x86/kernel/shstk.c              | 108 +++++++++++++++++++++++++++
 2 files changed, 121 insertions(+)

Comments

Kees Cook Oct. 3, 2022, 8:44 p.m. UTC | #1
On Thu, Sep 29, 2022 at 03:29:23PM -0700, Rick Edgecombe wrote:
> From: Yu-cheng Yu <yu-cheng.yu@intel.com>
> 
> Shadow stack's are normally written to via CALL/RET or specific CET
> instuctions like RSTORSSP/SAVEPREVSSP. However during some Linux
> operations the kernel will need to write to directly using the ring-0 only
> WRUSS instruction.
> 
> A shadow stack restore token marks a restore point of the shadow stack, and
> the address in a token must point directly above the token, which is within
> the same shadow stack. This is distinctively different from other pointers
> on the shadow stack, since those pointers point to executable code area.
> 
> Introduce token setup and verify routines. Also introduce WRUSS, which is
> a kernel-mode instruction but writes directly to user shadow stack.
> 
> In future patches that enable shadow stack to work with signals, the kernel
> will need something to denote the point in the stack where sigreturn may be
> called. This will prevent attackers calling sigreturn at arbitrary places
> in the stack, in order to help prevent SROP attacks.
> 
> To do this, something that can only be written by the kernel needs to be
> placed on the shadow stack. This can be accomplished by setting bit 63 in
> the frame written to the shadow stack. Userspace return addresses can't
> have this bit set as it is in the kernel range. It is also can't be a
> valid restore token.
> 
> Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
> Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
> Cc: Kees Cook <keescook@chromium.org>
> 
> ---
> 
> v2:
>  - Add data helpers for writing to shadow stack.
> 
> v1:
>  - Use xsave helpers.
> 
> Yu-cheng v30:
>  - Update commit log, remove description about signals.
>  - Update various comments.
>  - Remove variable 'ssp' init and adjust return value accordingly.
>  - Check get_user_shstk_addr() return value.
>  - Replace 'ia32' with 'proc32'.
> 
> Yu-cheng v29:
>  - Update comments for the use of get_xsave_addr().
> 
>  arch/x86/include/asm/special_insns.h |  13 ++++
>  arch/x86/kernel/shstk.c              | 108 +++++++++++++++++++++++++++
>  2 files changed, 121 insertions(+)
> 
> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> index 35f709f619fb..f096f52bd059 100644
> --- a/arch/x86/include/asm/special_insns.h
> +++ b/arch/x86/include/asm/special_insns.h
> @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p)
>  		: [pax] "a" (p));
>  }
>  
> +#ifdef CONFIG_X86_SHADOW_STACK
> +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
> +{
> +	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> +			  _ASM_EXTABLE(1b, %l[fail])
> +			  :: [addr] "r" (addr), [val] "r" (val)
> +			  :: fail);
> +	return 0;
> +fail:
> +	return -EFAULT;
> +}
> +#endif /* CONFIG_X86_SHADOW_STACK */
> +
>  #define nop() asm volatile ("nop")
>  
>  static inline void serialize(void)
> diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
> index db4e53f9fdaf..8904aef487bf 100644
> --- a/arch/x86/kernel/shstk.c
> +++ b/arch/x86/kernel/shstk.c
> @@ -25,6 +25,8 @@
>  #include <asm/fpu/api.h>
>  #include <asm/prctl.h>
>  
> +#define SS_FRAME_SIZE 8
> +
>  static bool feature_enabled(unsigned long features)
>  {
>  	return current->thread.features & features;
> @@ -40,6 +42,31 @@ static void feature_clr(unsigned long features)
>  	current->thread.features &= ~features;
>  }
>  
> +/*
> + * Create a restore token on the shadow stack.  A token is always 8-byte
> + * and aligned to 8.
> + */
> +static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
> +{
> +	unsigned long addr;
> +
> +	/* Token must be aligned */
> +	if (!IS_ALIGNED(ssp, 8))
> +		return -EINVAL;
> +
> +	addr = ssp - SS_FRAME_SIZE;
> +
> +	/* Mark the token 64-bit */
> +	ssp |= BIT(0);

Wow, that confused me for a moment. :) SDE says:

- Bit 63:2 – Value of shadow stack pointer when this restore point was created.
- Bit 1 – Reserved. Must be zero.
- Bit 0 – Mode bit. If 0, the token is a compatibility/legacy mode
          “shadow stack restore” token. If 1, then this shadow stack restore
          token can be used with a RSTORSSP instruction in 64-bit mode.

So shouldn't this actually be:

	ssp &= ~BIT(1);	/* Reserved */
	ssp |=  BIT(0); /* RSTORSSP instruction in 64-bit mode */

> +
> +	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
> +		return -EFAULT;
> +
> +	*token_addr = addr;
> +
> +	return 0;
> +}
> +
>  static unsigned long alloc_shstk(unsigned long size)
>  {
>  	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
> @@ -158,6 +185,87 @@ int shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
>  	return 0;
>  }
>  
> +static unsigned long get_user_shstk_addr(void)
> +{
> +	unsigned long long ssp;
> +
> +	fpu_lock_and_load();
> +
> +	rdmsrl(MSR_IA32_PL3_SSP, ssp);
> +
> +	fpregs_unlock();
> +
> +	return ssp;
> +}
> +
> +static int put_shstk_data(u64 __user *addr, u64 data)
> +{
> +	WARN_ON(data & BIT(63));

Let's make this a bit more defensive:

	if (WARN_ON_ONCE(data & BIT(63)))
		return -EFAULT;

> +
> +	/*
> +	 * Mark the high bit so that the sigframe can't be processed as a
> +	 * return address.
> +	 */
> +	if (write_user_shstk_64(addr, data | BIT(63)))
> +		return -EFAULT;
> +	return 0;
> +}
> +
> +static int get_shstk_data(unsigned long *data, unsigned long __user *addr)
> +{
> +	unsigned long ldata;
> +
> +	if (unlikely(get_user(ldata, addr)))
> +		return -EFAULT;
> +
> +	if (!(ldata & BIT(63)))
> +		return -EINVAL;
> +
> +	*data = ldata & ~BIT(63);
> +
> +	return 0;
> +}
> +
> +/*
> + * Verify the user shadow stack has a valid token on it, and then set
> + * *new_ssp according to the token.
> + */
> +static int shstk_check_rstor_token(unsigned long *new_ssp)
> +{
> +	unsigned long token_addr;
> +	unsigned long token;
> +
> +	token_addr = get_user_shstk_addr();
> +	if (!token_addr)
> +		return -EINVAL;
> +
> +	if (get_user(token, (unsigned long __user *)token_addr))
> +		return -EFAULT;
> +
> +	/* Is mode flag correct? */
> +	if (!(token & BIT(0)))
> +		return -EINVAL;
> +
> +	/* Is busy flag set? */

"Busy"? Not "Reserved"?

> +	if (token & BIT(1))
> +		return -EINVAL;
> +
> +	/* Mask out flags */
> +	token &= ~3UL;
> +
> +	/* Restore address aligned? */
> +	if (!IS_ALIGNED(token, 8))
> +		return -EINVAL;
> +
> +	/* Token placed properly? */
> +	if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >= TASK_SIZE_MAX)
> +		return -EINVAL;
> +
> +	*new_ssp = token;
> +
> +	return 0;
> +}
> +
>  void shstk_free(struct task_struct *tsk)
>  {
>  	struct thread_shstk *shstk = &tsk->thread.shstk;
> -- 
> 2.17.1
>
Rick Edgecombe Oct. 4, 2022, 10:13 p.m. UTC | #2
On Mon, 2022-10-03 at 13:44 -0700, Kees Cook wrote:
> On Thu, Sep 29, 2022 at 03:29:23PM -0700, Rick Edgecombe wrote:
> > From: Yu-cheng Yu <yu-cheng.yu@intel.com>
> > 
> > Shadow stack's are normally written to via CALL/RET or specific CET
> > instuctions like RSTORSSP/SAVEPREVSSP. However during some Linux
> > operations the kernel will need to write to directly using the
> > ring-0 only
> > WRUSS instruction.
> > 
> > A shadow stack restore token marks a restore point of the shadow
> > stack, and
> > the address in a token must point directly above the token, which
> > is within
> > the same shadow stack. This is distinctively different from other
> > pointers
> > on the shadow stack, since those pointers point to executable code
> > area.
> > 
> > Introduce token setup and verify routines. Also introduce WRUSS,
> > which is
> > a kernel-mode instruction but writes directly to user shadow stack.
> > 
> > In future patches that enable shadow stack to work with signals,
> > the kernel
> > will need something to denote the point in the stack where
> > sigreturn may be
> > called. This will prevent attackers calling sigreturn at arbitrary
> > places
> > in the stack, in order to help prevent SROP attacks.
> > 
> > To do this, something that can only be written by the kernel needs
> > to be
> > placed on the shadow stack. This can be accomplished by setting bit
> > 63 in
> > the frame written to the shadow stack. Userspace return addresses
> > can't
> > have this bit set as it is in the kernel range. It is also can't be
> > a
> > valid restore token.
> > 
> > Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
> > Co-developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
> > Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
> > Cc: Kees Cook <keescook@chromium.org>
> > 
> > ---
> > 
> > v2:
> >  - Add data helpers for writing to shadow stack.
> > 
> > v1:
> >  - Use xsave helpers.
> > 
> > Yu-cheng v30:
> >  - Update commit log, remove description about signals.
> >  - Update various comments.
> >  - Remove variable 'ssp' init and adjust return value accordingly.
> >  - Check get_user_shstk_addr() return value.
> >  - Replace 'ia32' with 'proc32'.
> > 
> > Yu-cheng v29:
> >  - Update comments for the use of get_xsave_addr().
> > 
> >  arch/x86/include/asm/special_insns.h |  13 ++++
> >  arch/x86/kernel/shstk.c              | 108
> > +++++++++++++++++++++++++++
> >  2 files changed, 121 insertions(+)
> > 
> > diff --git a/arch/x86/include/asm/special_insns.h
> > b/arch/x86/include/asm/special_insns.h
> > index 35f709f619fb..f096f52bd059 100644
> > --- a/arch/x86/include/asm/special_insns.h
> > +++ b/arch/x86/include/asm/special_insns.h
> > @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p)
> >  		: [pax] "a" (p));
> >  }
> >  
> > +#ifdef CONFIG_X86_SHADOW_STACK
> > +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
> > +{
> > +	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> > +			  _ASM_EXTABLE(1b, %l[fail])
> > +			  :: [addr] "r" (addr), [val] "r" (val)
> > +			  :: fail);
> > +	return 0;
> > +fail:
> > +	return -EFAULT;
> > +}
> > +#endif /* CONFIG_X86_SHADOW_STACK */
> > +
> >  #define nop() asm volatile ("nop")
> >  
> >  static inline void serialize(void)
> > diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
> > index db4e53f9fdaf..8904aef487bf 100644
> > --- a/arch/x86/kernel/shstk.c
> > +++ b/arch/x86/kernel/shstk.c
> > @@ -25,6 +25,8 @@
> >  #include <asm/fpu/api.h>
> >  #include <asm/prctl.h>
> >  
> > +#define SS_FRAME_SIZE 8
> > +
> >  static bool feature_enabled(unsigned long features)
> >  {
> >  	return current->thread.features & features;
> > @@ -40,6 +42,31 @@ static void feature_clr(unsigned long features)
> >  	current->thread.features &= ~features;
> >  }
> >  
> > +/*
> > + * Create a restore token on the shadow stack.  A token is always
> > 8-byte
> > + * and aligned to 8.
> > + */
> > +static int create_rstor_token(unsigned long ssp, unsigned long
> > *token_addr)
> > +{
> > +	unsigned long addr;
> > +
> > +	/* Token must be aligned */
> > +	if (!IS_ALIGNED(ssp, 8))
> > +		return -EINVAL;
> > +
> > +	addr = ssp - SS_FRAME_SIZE;
> > +
> > +	/* Mark the token 64-bit */
> > +	ssp |= BIT(0);
> 
> Wow, that confused me for a moment. :) SDE says:
> 
> - Bit 63:2 – Value of shadow stack pointer when this restore point
> was created.
> - Bit 1 – Reserved. Must be zero.
> - Bit 0 – Mode bit. If 0, the token is a compatibility/legacy mode
>           “shadow stack restore” token. If 1, then this shadow stack
> restore
>           token can be used with a RSTORSSP instruction in 64-bit
> mode.
> 
> So shouldn't this actually be:
> 
> 	ssp &= ~BIT(1);	/* Reserved */
> 	ssp |=  BIT(0); /* RSTORSSP instruction in 64-bit mode */

Since SSP is aligned, it should not have bits 0 to 2 set. I'll add a
comment.

> 
> > +
> > +	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
> > +		return -EFAULT;
> > +
> > +	*token_addr = addr;
> > +
> > +	return 0;
> > +}
> > +
> >  static unsigned long alloc_shstk(unsigned long size)
> >  {
> >  	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
> > @@ -158,6 +185,87 @@ int shstk_alloc_thread_stack(struct
> > task_struct *tsk, unsigned long clone_flags,
> >  	return 0;
> >  }
> >  
> > +static unsigned long get_user_shstk_addr(void)
> > +{
> > +	unsigned long long ssp;
> > +
> > +	fpu_lock_and_load();
> > +
> > +	rdmsrl(MSR_IA32_PL3_SSP, ssp);
> > +
> > +	fpregs_unlock();
> > +
> > +	return ssp;
> > +}
> > +
> > +static int put_shstk_data(u64 __user *addr, u64 data)
> > +{
> > +	WARN_ON(data & BIT(63));
> 
> Let's make this a bit more defensive:
> 
> 	if (WARN_ON_ONCE(data & BIT(63)))
> 		return -EFAULT;

Hmm, sure. I'm thinking EINVAL since the failure has nothing to do with
faulting.

> 
> > +
> > +	/*
> > +	 * Mark the high bit so that the sigframe can't be processed as
> > a
> > +	 * return address.
> > +	 */
> > +	if (write_user_shstk_64(addr, data | BIT(63)))
> > +		return -EFAULT;
> > +	return 0;
> > +}
> > +
> > +static int get_shstk_data(unsigned long *data, unsigned long
> > __user *addr)
> > +{
> > +	unsigned long ldata;
> > +
> > +	if (unlikely(get_user(ldata, addr)))
> > +		return -EFAULT;
> > +
> > +	if (!(ldata & BIT(63)))
> > +		return -EINVAL;
> > +
> > +	*data = ldata & ~BIT(63);
> > +
> > +	return 0;
> > +}
> > +
> > +/*
> > + * Verify the user shadow stack has a valid token on it, and then
> > set
> > + * *new_ssp according to the token.
> > + */
> > +static int shstk_check_rstor_token(unsigned long *new_ssp)
> > +{
> > +	unsigned long token_addr;
> > +	unsigned long token;
> > +
> > +	token_addr = get_user_shstk_addr();
> > +	if (!token_addr)
> > +		return -EINVAL;
> > +
> > +	if (get_user(token, (unsigned long __user *)token_addr))
> > +		return -EFAULT;
> > +
> > +	/* Is mode flag correct? */
> > +	if (!(token & BIT(0)))
> > +		return -EINVAL;
> > +
> > +	/* Is busy flag set? */
> 
> "Busy"? Not "Reserved"?

Yes reserved is more accurate, I'll change it. In a previous-ssp token
it is 1, so kind of busy-like. That is probably where the comment came
from.

> 
> > +	if (token & BIT(1))
> > +		return -EINVAL;
> > +
> > +	/* Mask out flags */
> > +	token &= ~3UL;
> > +
> > +	/* Restore address aligned? */
> > +	if (!IS_ALIGNED(token, 8))
> > +		return -EINVAL;
> > +
> > +	/* Token placed properly? */
> > +	if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >=
> > TASK_SIZE_MAX)
> > +		return -EINVAL;
> > +
> > +	*new_ssp = token;
> > +
> > +	return 0;
> > +}
> > +
> >  void shstk_free(struct task_struct *tsk)
> >  {
> >  	struct thread_shstk *shstk = &tsk->thread.shstk;
> > -- 
> > 2.17.1
> > 
> 
>
Andrew Cooper Oct. 5, 2022, 2:43 a.m. UTC | #3
On 29/09/2022 23:29, Rick Edgecombe wrote:
> diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
> index 35f709f619fb..f096f52bd059 100644
> --- a/arch/x86/include/asm/special_insns.h
> +++ b/arch/x86/include/asm/special_insns.h
> @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p)
>  		: [pax] "a" (p));
>  }
>  
> +#ifdef CONFIG_X86_SHADOW_STACK
> +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
> +{
> +	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> +			  _ASM_EXTABLE(1b, %l[fail])
> +			  :: [addr] "r" (addr), [val] "r" (val)
> +			  :: fail);

"1: wrssq %[val], %[addr]\n"
_ASM_EXTABLE(1b, %l[fail])
: [addr] "+m" (*addr)
: [val] "r" (val)
:: fail

Otherwise you've failed to tell the compiler that you wrote to *addr.

With that fixed, it's not volatile because there are no unexpressed side
effects.

~Andrew
Rick Edgecombe Oct. 5, 2022, 10:47 p.m. UTC | #4
On Wed, 2022-10-05 at 02:43 +0000, Andrew Cooper wrote:
> On 29/09/2022 23:29, Rick Edgecombe wrote:
> > diff --git a/arch/x86/include/asm/special_insns.h
> > b/arch/x86/include/asm/special_insns.h
> > index 35f709f619fb..f096f52bd059 100644
> > --- a/arch/x86/include/asm/special_insns.h
> > +++ b/arch/x86/include/asm/special_insns.h
> > @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p)
> >                : [pax] "a" (p));
> >   }
> >   
> > +#ifdef CONFIG_X86_SHADOW_STACK
> > +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
> > +{
> > +     asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> > +                       _ASM_EXTABLE(1b, %l[fail])
> > +                       :: [addr] "r" (addr), [val] "r" (val)
> > +                       :: fail);
> 
> "1: wrssq %[val], %[addr]\n"
> _ASM_EXTABLE(1b, %l[fail])
> : [addr] "+m" (*addr)
> : [val] "r" (val)
> :: fail
> 
> Otherwise you've failed to tell the compiler that you wrote to *addr.
> 
> With that fixed, it's not volatile because there are no unexpressed
> side
> effects.

Ok, thanks!
Andrew Cooper Oct. 5, 2022, 10:58 p.m. UTC | #5
On 05/10/2022 23:47, Edgecombe, Rick P wrote:
> On Wed, 2022-10-05 at 02:43 +0000, Andrew Cooper wrote:
>> On 29/09/2022 23:29, Rick Edgecombe wrote:
>>> diff --git a/arch/x86/include/asm/special_insns.h
>>> b/arch/x86/include/asm/special_insns.h
>>> index 35f709f619fb..f096f52bd059 100644
>>> --- a/arch/x86/include/asm/special_insns.h
>>> +++ b/arch/x86/include/asm/special_insns.h
>>> @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p)
>>>                : [pax] "a" (p));
>>>   }
>>>   
>>> +#ifdef CONFIG_X86_SHADOW_STACK
>>> +static inline int write_user_shstk_64(u64 __user *addr, u64 val)
>>> +{
>>> +     asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
>>> +                       _ASM_EXTABLE(1b, %l[fail])
>>> +                       :: [addr] "r" (addr), [val] "r" (val)
>>> +                       :: fail);
>> "1: wrssq %[val], %[addr]\n"
>> _ASM_EXTABLE(1b, %l[fail])
>> : [addr] "+m" (*addr)
>> : [val] "r" (val)
>> :: fail
>>
>> Otherwise you've failed to tell the compiler that you wrote to *addr.
>>
>> With that fixed, it's not volatile because there are no unexpressed
>> side
>> effects.
> Ok, thanks!

On further consideration, it should be "=m" not "+m", which is even less
constrained, and even easier for an enterprising optimiser to try and do
something useful with.

~Andrew
Rick Edgecombe Oct. 20, 2022, 9:51 p.m. UTC | #6
On Wed, 2022-10-05 at 22:58 +0000, Andrew Cooper wrote:
> On 05/10/2022 23:47, Edgecombe, Rick P wrote:
> > On Wed, 2022-10-05 at 02:43 +0000, Andrew Cooper wrote:
> > > On 29/09/2022 23:29, Rick Edgecombe wrote:
> > > > diff --git a/arch/x86/include/asm/special_insns.h
> > > > b/arch/x86/include/asm/special_insns.h
> > > > index 35f709f619fb..f096f52bd059 100644
> > > > --- a/arch/x86/include/asm/special_insns.h
> > > > +++ b/arch/x86/include/asm/special_insns.h
> > > > @@ -223,6 +223,19 @@ static inline void clwb(volatile void
> > > > *__p)
> > > >                 : [pax] "a" (p));
> > > >    }
> > > >    
> > > > +#ifdef CONFIG_X86_SHADOW_STACK
> > > > +static inline int write_user_shstk_64(u64 __user *addr, u64
> > > > val)
> > > > +{
> > > > +     asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
> > > > +                       _ASM_EXTABLE(1b, %l[fail])
> > > > +                       :: [addr] "r" (addr), [val] "r" (val)
> > > > +                       :: fail);
> > > 
> > > "1: wrssq %[val], %[addr]\n"
> > > _ASM_EXTABLE(1b, %l[fail])
> > > : [addr] "+m" (*addr)
> > > : [val] "r" (val)
> > > :: fail
> > > 
> > > Otherwise you've failed to tell the compiler that you wrote to
> > > *addr.
> > > 
> > > With that fixed, it's not volatile because there are no
> > > unexpressed
> > > side
> > > effects.
> > 
> > Ok, thanks!
> 
> On further consideration, it should be "=m" not "+m", which is even
> less
> constrained, and even easier for an enterprising optimiser to try and
> do
> something useful with.

AFAICT this won't work on all gccs. Asm goto's used to not support
having outputs. They are also implicitly volatile anyway. So I think
I'll have to leave it. But I can change the wrss one in the selftest to
"=m".
diff mbox series

Patch

diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 35f709f619fb..f096f52bd059 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -223,6 +223,19 @@  static inline void clwb(volatile void *__p)
 		: [pax] "a" (p));
 }
 
+#ifdef CONFIG_X86_SHADOW_STACK
+static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+{
+	asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+			  _ASM_EXTABLE(1b, %l[fail])
+			  :: [addr] "r" (addr), [val] "r" (val)
+			  :: fail);
+	return 0;
+fail:
+	return -EFAULT;
+}
+#endif /* CONFIG_X86_SHADOW_STACK */
+
 #define nop() asm volatile ("nop")
 
 static inline void serialize(void)
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index db4e53f9fdaf..8904aef487bf 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -25,6 +25,8 @@ 
 #include <asm/fpu/api.h>
 #include <asm/prctl.h>
 
+#define SS_FRAME_SIZE 8
+
 static bool feature_enabled(unsigned long features)
 {
 	return current->thread.features & features;
@@ -40,6 +42,31 @@  static void feature_clr(unsigned long features)
 	current->thread.features &= ~features;
 }
 
+/*
+ * Create a restore token on the shadow stack.  A token is always 8-byte
+ * and aligned to 8.
+ */
+static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
+{
+	unsigned long addr;
+
+	/* Token must be aligned */
+	if (!IS_ALIGNED(ssp, 8))
+		return -EINVAL;
+
+	addr = ssp - SS_FRAME_SIZE;
+
+	/* Mark the token 64-bit */
+	ssp |= BIT(0);
+
+	if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
+		return -EFAULT;
+
+	*token_addr = addr;
+
+	return 0;
+}
+
 static unsigned long alloc_shstk(unsigned long size)
 {
 	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
@@ -158,6 +185,87 @@  int shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
 	return 0;
 }
 
+static unsigned long get_user_shstk_addr(void)
+{
+	unsigned long long ssp;
+
+	fpu_lock_and_load();
+
+	rdmsrl(MSR_IA32_PL3_SSP, ssp);
+
+	fpregs_unlock();
+
+	return ssp;
+}
+
+static int put_shstk_data(u64 __user *addr, u64 data)
+{
+	WARN_ON(data & BIT(63));
+
+	/*
+	 * Mark the high bit so that the sigframe can't be processed as a
+	 * return address.
+	 */
+	if (write_user_shstk_64(addr, data | BIT(63)))
+		return -EFAULT;
+	return 0;
+}
+
+static int get_shstk_data(unsigned long *data, unsigned long __user *addr)
+{
+	unsigned long ldata;
+
+	if (unlikely(get_user(ldata, addr)))
+		return -EFAULT;
+
+	if (!(ldata & BIT(63)))
+		return -EINVAL;
+
+	*data = ldata & ~BIT(63);
+
+	return 0;
+}
+
+/*
+ * Verify the user shadow stack has a valid token on it, and then set
+ * *new_ssp according to the token.
+ */
+static int shstk_check_rstor_token(unsigned long *new_ssp)
+{
+	unsigned long token_addr;
+	unsigned long token;
+
+	token_addr = get_user_shstk_addr();
+	if (!token_addr)
+		return -EINVAL;
+
+	if (get_user(token, (unsigned long __user *)token_addr))
+		return -EFAULT;
+
+	/* Is mode flag correct? */
+	if (!(token & BIT(0)))
+		return -EINVAL;
+
+	/* Is busy flag set? */
+	if (token & BIT(1))
+		return -EINVAL;
+
+	/* Mask out flags */
+	token &= ~3UL;
+
+	/* Restore address aligned? */
+	if (!IS_ALIGNED(token, 8))
+		return -EINVAL;
+
+	/* Token placed properly? */
+	if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >= TASK_SIZE_MAX)
+		return -EINVAL;
+
+	*new_ssp = token;
+
+	return 0;
+}
+
 void shstk_free(struct task_struct *tsk)
 {
 	struct thread_shstk *shstk = &tsk->thread.shstk;