diff mbox series

[v2,12/21] KVM:x86: Add fault checks for guest CR4.CET setting

Message ID 20230421134615.62539-13-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable CET Virtualization | expand

Commit Message

Yang, Weijiang April 21, 2023, 1:46 p.m. UTC
Check potential faults for CR4.CET setting per Intel SDM.
CR4.CET is the master control bit for CET features (SHSTK and IBT).
In addition to basic support checks, CET can be enabled if and only
if CR0.WP==1, i.e. setting CR4.CET=1 faults if CR0.WP==0 and setting
CR0.WP=0 fails if CR4.CET==1.

Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/x86.c | 6 ++++++
 arch/x86/kvm/x86.h | 3 +++
 2 files changed, 9 insertions(+)

Comments

Binbin Wu May 5, 2023, 5:01 a.m. UTC | #1
On 4/21/2023 9:46 PM, Yang Weijiang wrote:
> Check potential faults for CR4.CET setting per Intel SDM.
> CR4.CET is the master control bit for CET features (SHSTK and IBT).
> In addition to basic support checks, CET can be enabled if and only
> if CR0.WP==1, i.e. setting CR4.CET=1 faults if CR0.WP==0 and setting
> CR0.WP=0 fails if CR4.CET==1.
>
> Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   arch/x86/kvm/x86.c | 6 ++++++
>   arch/x86/kvm/x86.h | 3 +++
>   2 files changed, 9 insertions(+)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a768cbf3fbb7..7cd7f6755acd 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -995,6 +995,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
>   	    (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
>   		return 1;
>   
> +	if (!(cr0 & X86_CR0_WP) && kvm_read_cr4_bits(vcpu, X86_CR4_CET))
You can use kvm_is_cr4_bit_set() instead of kvm_read_cr4_bits()

> +		return 1;
> +
>   	static_call(kvm_x86_set_cr0)(vcpu, cr0);
>   
>   	kvm_post_set_cr0(vcpu, old_cr0, cr0);
> @@ -1210,6 +1213,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
>   			return 1;
>   	}
>   
> +	if ((cr4 & X86_CR4_CET) && !(kvm_read_cr0(vcpu) & X86_CR0_WP))
You can use kvm_is_cr0_bit_set() to check X86_CR0_WP

> +		return 1;
> +
>   	static_call(kvm_x86_set_cr4)(vcpu, cr4);
>   
>   	kvm_post_set_cr4(vcpu, old_cr4, cr4);
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 2ba7c7fc4846..daadd5330dae 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -536,6 +536,9 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
>   		__reserved_bits |= X86_CR4_VMXE;        \
>   	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
>   		__reserved_bits |= X86_CR4_PCIDE;       \
> +	if (!__cpu_has(__c, X86_FEATURE_SHSTK) &&	\
> +	    !__cpu_has(__c, X86_FEATURE_IBT))		\
> +		__reserved_bits |= X86_CR4_CET;		\
IMO, it is a bit wired to split this part from the change of 
CR4_RESERVED_BITS.


>   	__reserved_bits;                                \
>   })
>
Yang, Weijiang May 5, 2023, 7:24 a.m. UTC | #2
On 5/5/2023 1:01 PM, Binbin Wu wrote:
>
>
> On 4/21/2023 9:46 PM, Yang Weijiang wrote: 
[...]
>> @@ -995,6 +995,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned 
>> long cr0)
>>           (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, 
>> X86_CR4_PCIDE)))
>>           return 1;
>>   +    if (!(cr0 & X86_CR0_WP) && kvm_read_cr4_bits(vcpu, X86_CR4_CET))
> You can use kvm_is_cr4_bit_set() instead of kvm_read_cr4_bits()

Good suggestion, thanks!

>
>> +        return 1;
>> +
>>       static_call(kvm_x86_set_cr0)(vcpu, cr0);
>>         kvm_post_set_cr0(vcpu, old_cr0, cr0);
>> @@ -1210,6 +1213,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned 
>> long cr4)
>>               return 1;
>>       }
>>   +    if ((cr4 & X86_CR4_CET) && !(kvm_read_cr0(vcpu) & X86_CR0_WP))
> You can use kvm_is_cr0_bit_set() to check X86_CR0_WP

OK.

>
>> +        return 1;
>> +
>>
[...]
>> @@ -536,6 +536,9 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 
>> index, u32 type);
>>           __reserved_bits |= X86_CR4_VMXE;        \
>>       if (!__cpu_has(__c, X86_FEATURE_PCID))          \
>>           __reserved_bits |= X86_CR4_PCIDE;       \
>> +    if (!__cpu_has(__c, X86_FEATURE_SHSTK) &&    \
>> +        !__cpu_has(__c, X86_FEATURE_IBT))        \
>> +        __reserved_bits |= X86_CR4_CET;        \
> IMO, it is a bit wired to split this part from the change of 
> CR4_RESERVED_BITS.

Make sense, will move these lines to other patch.

>
>
>> __reserved_bits;                                \
>>   })
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a768cbf3fbb7..7cd7f6755acd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -995,6 +995,9 @@  int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 	    (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
 		return 1;
 
+	if (!(cr0 & X86_CR0_WP) && kvm_read_cr4_bits(vcpu, X86_CR4_CET))
+		return 1;
+
 	static_call(kvm_x86_set_cr0)(vcpu, cr0);
 
 	kvm_post_set_cr0(vcpu, old_cr0, cr0);
@@ -1210,6 +1213,9 @@  int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 			return 1;
 	}
 
+	if ((cr4 & X86_CR4_CET) && !(kvm_read_cr0(vcpu) & X86_CR0_WP))
+		return 1;
+
 	static_call(kvm_x86_set_cr4)(vcpu, cr4);
 
 	kvm_post_set_cr4(vcpu, old_cr4, cr4);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2ba7c7fc4846..daadd5330dae 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -536,6 +536,9 @@  bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
 		__reserved_bits |= X86_CR4_VMXE;        \
 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
 		__reserved_bits |= X86_CR4_PCIDE;       \
+	if (!__cpu_has(__c, X86_FEATURE_SHSTK) &&	\
+	    !__cpu_has(__c, X86_FEATURE_IBT))		\
+		__reserved_bits |= X86_CR4_CET;		\
 	__reserved_bits;                                \
 })