diff mbox series

[kvm-unit-tests,v4,4/4] x86: Add test case for INVVPID with LAM

Message ID 20230504084751.968-5-binbin.wu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series x86: Add test cases for LAM | expand

Commit Message

Binbin Wu May 4, 2023, 8:47 a.m. UTC
When LAM is on, the linear address of INVVPID operand can contain
metadata, and the linear address in the INVVPID descriptor can
contain metadata.

The added cases use tagged descriptor address or/and tagged target
invalidation address to make sure the behaviors are expected when
LAM is on.
Also, INVVPID cases can be used as the common test cases for VMX
instruction VMExits.

Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
---
 x86/vmx_tests.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 51 insertions(+), 1 deletion(-)

Comments

Binbin Wu May 9, 2023, 1:38 a.m. UTC | #1
On 5/4/2023 4:47 PM, Binbin Wu wrote:
> When LAM is on, the linear address of INVVPID operand can contain
> metadata, and the linear address in the INVVPID descriptor can
> contain metadata.
>
> The added cases use tagged descriptor address or/and tagged target
> invalidation address to make sure the behaviors are expected when
> LAM is on.
> Also, INVVPID cases can be used as the common test cases for VMX
> instruction VMExits.
>
> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
> Reviewed-by: Chao Gao <chao.gao@intel.com>
> ---
>   x86/vmx_tests.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++-
>   1 file changed, 51 insertions(+), 1 deletion(-)
>
> diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
> index 217befe..678c9ec 100644
> --- a/x86/vmx_tests.c
> +++ b/x86/vmx_tests.c
> @@ -3225,6 +3225,54 @@ static void invvpid_test_not_in_vmx_operation(void)
>   	TEST_ASSERT(!vmx_on());
>   }
>   
> +/* LAM applies to the target address inside the descriptor of invvpid */
> +static void invvpid_test_lam(void)
> +{
> +	void *vaddr;
> +	struct invvpid_operand *operand;
> +	u64 lam_mask = LAM48_MASK;
> +	bool fault;
> +
> +	if (!this_cpu_has(X86_FEATURE_LAM)) {
> +		report_skip("LAM is not supported, skip INVVPID with LAM");
> +		return;
> +	}
> +	write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
> +
> +	if (this_cpu_has(X86_FEATURE_LA57) && read_cr4() & X86_CR4_LA57)
> +		lam_mask = LAM57_MASK;
> +
> +	vaddr = alloc_vpage();
> +	install_page(current_page_table(), virt_to_phys(alloc_page()), vaddr);
> +	/*
> +	 * Since the stack memory address in KUT doesn't follow kernel address
> +	 * space partition rule, reuse the memory address for descriptor and
> +	 * the target address in the descriptor of invvpid.
> +	 */
> +	operand = (struct invvpid_operand *)vaddr;
> +	operand->vpid = 0xffff;
> +	operand->gla = (u64)vaddr;
> +
> +	operand = (struct invvpid_operand *)vaddr;
> +	operand->gla = set_la_non_canonical(operand->gla, lam_mask);
> +	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
> +	report(!fault, "INVVPID (LAM on): untagged pointer + tagged addr");
> +
> +	operand = (struct invvpid_operand *)set_la_non_canonical((u64)operand,
> +								 lam_mask);
> +	operand->gla = (u64)vaddr;
> +	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
> +	report(!fault, "INVVPID (LAM on): tagged pointer + untagged addr");
> +
> +	operand = (struct invvpid_operand *)set_la_non_canonical((u64)operand,
> +								 lam_mask);
> +	operand->gla = set_la_non_canonical(operand->gla, lam_mask);
> +	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
> +	report(!fault, "INVVPID (LAM on): tagged pointer + tagged addr");
The test cases designed for invvpid with LAM is not right.

Will use two test cases to test invvpid when LAM is activated:
One to test with tagged operand expecting no #GP.
The other one to test with tagged target address inside the descriptor 
expecting failure and VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID set in 
VMX_INST_ERROR field of VMCS.

The new test code proposed as below:

     ....
     operand = (struct invvpid_operand *)vaddr;
     operand->vpid = 0xffff;
     operand->gla = (u64)vaddr;
     operand = (struct invvpid_operand *)set_la_non_canonical((u64)operand,
                                  lam_mask);
     fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
     report(!fault, "INVVPID (LAM on): tagged operand");

     /*
      * LAM doesn't apply to the address inside the descriptor, expected
      * failure and VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID set in
      * VMX_INST_ERROR.
      */
     try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL);


> +
> +	write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
> +}
> +
>   /*
>    * This does not test real-address mode, virtual-8086 mode, protected mode,
>    * or CPL > 0.
> @@ -3274,8 +3322,10 @@ static void invvpid_test(void)
>   	/*
>   	 * The gla operand is only validated for single-address INVVPID.
>   	 */
> -	if (types & (1u << INVVPID_ADDR))
> +	if (types & (1u << INVVPID_ADDR)) {
>   		try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL);
> +		invvpid_test_lam();
> +	}
>   
>   	invvpid_test_gp();
>   	invvpid_test_ss();
diff mbox series

Patch

diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index 217befe..678c9ec 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -3225,6 +3225,54 @@  static void invvpid_test_not_in_vmx_operation(void)
 	TEST_ASSERT(!vmx_on());
 }
 
+/* LAM applies to the target address inside the descriptor of invvpid */
+static void invvpid_test_lam(void)
+{
+	void *vaddr;
+	struct invvpid_operand *operand;
+	u64 lam_mask = LAM48_MASK;
+	bool fault;
+
+	if (!this_cpu_has(X86_FEATURE_LAM)) {
+		report_skip("LAM is not supported, skip INVVPID with LAM");
+		return;
+	}
+	write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
+
+	if (this_cpu_has(X86_FEATURE_LA57) && read_cr4() & X86_CR4_LA57)
+		lam_mask = LAM57_MASK;
+
+	vaddr = alloc_vpage();
+	install_page(current_page_table(), virt_to_phys(alloc_page()), vaddr);
+	/*
+	 * Since the stack memory address in KUT doesn't follow kernel address
+	 * space partition rule, reuse the memory address for descriptor and
+	 * the target address in the descriptor of invvpid.
+	 */
+	operand = (struct invvpid_operand *)vaddr;
+	operand->vpid = 0xffff;
+	operand->gla = (u64)vaddr;
+
+	operand = (struct invvpid_operand *)vaddr;
+	operand->gla = set_la_non_canonical(operand->gla, lam_mask);
+	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
+	report(!fault, "INVVPID (LAM on): untagged pointer + tagged addr");
+
+	operand = (struct invvpid_operand *)set_la_non_canonical((u64)operand,
+								 lam_mask);
+	operand->gla = (u64)vaddr;
+	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
+	report(!fault, "INVVPID (LAM on): tagged pointer + untagged addr");
+
+	operand = (struct invvpid_operand *)set_la_non_canonical((u64)operand,
+								 lam_mask);
+	operand->gla = set_la_non_canonical(operand->gla, lam_mask);
+	fault = test_for_exception(GP_VECTOR, ds_invvpid, operand);
+	report(!fault, "INVVPID (LAM on): tagged pointer + tagged addr");
+
+	write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
+}
+
 /*
  * This does not test real-address mode, virtual-8086 mode, protected mode,
  * or CPL > 0.
@@ -3274,8 +3322,10 @@  static void invvpid_test(void)
 	/*
 	 * The gla operand is only validated for single-address INVVPID.
 	 */
-	if (types & (1u << INVVPID_ADDR))
+	if (types & (1u << INVVPID_ADDR)) {
 		try_invvpid(INVVPID_ADDR, 0xffff, NONCANONICAL);
+		invvpid_test_lam();
+	}
 
 	invvpid_test_gp();
 	invvpid_test_ss();