diff mbox series

[v2,2/4] x86: Add test case for LAM_SUP

Message ID 20230319082225.14302-3-binbin.wu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series x86: Add test cases for LAM | expand

Commit Message

Binbin Wu March 19, 2023, 8:22 a.m. UTC
From: Robert Hoo <robert.hu@linux.intel.com>

This unit test covers:
1. CR4.LAM_SUP toggle has expected behavior according to LAM status.
2. Memory access (here is strcpy() for test example) with supervisor mode
   address containing LAM meta data, behave as expected per LAM status.
3. MMIO memory access with supervisor mode address containing LAM meta
   data, behave as expected per LAM status.
4. INVLPG memory operand doens't contain LAM meta data, if the address
   is non-canonical form then the INVLPG is the same as a NOP (no #GP).
5. INVPCID memory operand (descriptor pointer) could contain LAM meta data,
   however, the address in the descriptor should be canonical.

In x86/unittests.cfg, add 2 test cases/guest conf, with and without LAM.

LAM feature spec: https://cdrdv2.intel.com/v1/dl/getContent/671368, Chap 7
LINEAR ADDRESS MASKING (LAM)

Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
Co-developed-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
---
 lib/x86/processor.h |   3 +
 x86/Makefile.x86_64 |   1 +
 x86/lam.c           | 296 ++++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg   |  10 ++
 4 files changed, 310 insertions(+)
 create mode 100644 x86/lam.c

Comments

Chao Gao April 6, 2023, 3:50 a.m. UTC | #1
On Sun, Mar 19, 2023 at 04:22:23PM +0800, Binbin Wu wrote:
>+#define LAM57_BITS 6
>+#define LAM48_BITS 15
>+#define LAM57_MASK	GENMASK_ULL(62, 57)
>+#define LAM48_MASK	GENMASK_ULL(62, 48)
>+
>+struct invpcid_desc {
>+    u64 pcid : 12;
>+    u64 rsv  : 52;
>+    u64 addr : 64;

u64 addr;

>+
>+};
>+static int get_sup_lam_bits(void)
>+{
>+	if (this_cpu_has(X86_FEATURE_LA57) && read_cr4() & X86_CR4_LA57)
>+		return LAM57_BITS;
>+	else
>+		return LAM48_BITS;
>+}
>+
>+/* According to LAM mode, set metadata in high bits */
>+static u64 set_metadata(u64 src, unsigned long lam)
>+{
>+	u64 metadata;
>+
>+	switch (lam) {
>+	case LAM57_BITS: /* Set metadata in bits 62:57 */
>+		metadata = (NONCANONICAL & ((1UL << LAM57_BITS) - 1)) << 57;
>+		metadata |= (src & ~(LAM57_MASK));

this can be simplified to

	return (src & ~LAM47_MASK) | (NONCANONICAL & LAM47_MASK);

and you can pass a mask to set_metadata() and set mask to 0 if LAM isn't
enabled. Then set_metadata() can be further simplified to

	return (src & ~mask) | (NONCANONICAL & mask);

>+		break;
>+	case LAM48_BITS: /* Set metadata in bits 62:48 */
>+		metadata = (NONCANONICAL & ((1UL << LAM48_BITS) - 1)) << 48;
>+		metadata |= (src & ~(LAM48_MASK));
>+		break;
>+	default:
>+		metadata = src;
>+		break;
>+	}
>+
>+	return metadata;
>+}
>+
>+static void cr4_set_lam_sup(void *data)
>+{
>+	unsigned long cr4;
>+
>+	cr4 = read_cr4();
>+	write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
>+}
>+
>+static void cr4_clear_lam_sup(void *data)
>+{
>+	unsigned long cr4;
>+
>+	cr4 = read_cr4();
>+	write_cr4_safe(cr4 & ~X86_CR4_LAM_SUP);
>+}
>+
>+static void test_cr4_lam_set_clear(bool lam_enumerated)
>+{
>+	bool fault;
>+
>+	fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
>+	if (lam_enumerated)
>+		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
>+		       "Set CR4.LAM_SUP");
>+	else
>+		report(fault, "Set CR4.LAM_SUP causes #GP");
>+
>+	fault = test_for_exception(GP_VECTOR, &cr4_clear_lam_sup, NULL);
>+	report(!fault, "Clear CR4.LAM_SUP");
>+}
>+
>+static void do_strcpy(void *mem)
>+{
>+	strcpy((char *)mem, "LAM SUP Test string.");
>+}
>+
>+static inline uint64_t test_tagged_ptr(uint64_t arg1, uint64_t arg2,
>+	uint64_t arg3, uint64_t arg4)
>+{
>+	bool lam_enumerated = !!arg1;
>+	int lam_bits = (int)arg2;
>+	u64 *ptr = (u64 *)arg3;
>+	bool la_57 = !!arg4;
>+	bool fault;
>+
>+	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>+	report(!fault, "strcpy to untagged addr");
>+
>+	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
>+	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>+	if (lam_enumerated)
>+		report(!fault, "strcpy to tagged addr");
>+	else
>+		report(fault, "strcpy to tagged addr causes #GP");

...

>+
>+	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
>+		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
>+		fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>+		report(fault, "strcpy to non-LAM-canonical addr causes #GP");
>+	}
>+
>+	return 0;
>+}
>+
>+/* Refer to emulator.c */
>+static void do_mov_mmio(void *mem)
>+{
>+	unsigned long t1, t2;
>+
>+	// test mov reg, r/m and mov r/m, reg
>+	t1 = 0x123456789abcdefull & -1ul;
>+	asm volatile("mov %[t1], (%[mem])\n\t"
>+		     "mov (%[mem]), %[t2]"
>+		     : [t2]"=r"(t2)
>+		     : [t1]"r"(t1), [mem]"r"(mem)
>+		     : "memory");
>+}
>+
>+static inline uint64_t test_tagged_mmio_ptr(uint64_t arg1, uint64_t arg2,
>+	uint64_t arg3, uint64_t arg4)
>+{
>+	bool lam_enumerated = !!arg1;
>+	int lam_bits = (int)arg2;
>+	u64 *ptr = (u64 *)arg3;
>+	bool la_57 = !!arg4;
>+	bool fault;
>+
>+	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>+	report(!fault, "Access MMIO with untagged addr");
>+
>+	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
>+	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>+	if (lam_enumerated)
>+		report(!fault,  "Access MMIO with tagged addr");
>+	else
>+		report(fault,  "Access MMIO with tagged addr causes #GP");

Maybe make this (and other similar changes) more dense, e.g.,

	report(fault != lam_enumerated, "Access MMIO with tagged addr")

>+	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
>+		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
>+		fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>+		report(fault,  "Access MMIO with non-LAM-canonical addr"
>+		               " causes #GP");

don't break long strings.

>+	}

please add a comment to explain the intention of this test.

>+
>+	return 0;
>+}
>+
>+static void do_invlpg(void *mem)
>+{
>+	invlpg(mem);
>+}
>+
>+static void do_invlpg_fep(void *mem)
>+{
>+	asm volatile(KVM_FEP "invlpg (%0)" ::"r" (mem) : "memory");
>+}
>+
>+/* invlpg with tagged address is same as NOP, no #GP */
>+static void test_invlpg(void *va, bool fep)
>+{
>+	bool fault;
>+	u64 *ptr;
>+
>+	ptr = (u64 *)set_metadata((u64)va, get_sup_lam_bits());
>+	if (fep)
>+		fault = test_for_exception(GP_VECTOR, do_invlpg_fep, ptr);
>+	else
>+		fault = test_for_exception(GP_VECTOR, do_invlpg, ptr);
>+
>+	report(!fault, "%sINVLPG with tagged addr", fep?"fep: ":"");
>+}
>+
>+static void do_invpcid(void *desc)
>+{
>+	unsigned long type = 0;
>+	struct invpcid_desc *desc_ptr = (struct invpcid_desc *)desc;
>+
>+	asm volatile("invpcid %0, %1" :
>+	                              : "m" (*desc_ptr), "r" (type)
>+	                              : "memory");
>+}
>+
>+static void test_invpcid(bool lam_enumerated, void *data)
>+{
>+	struct invpcid_desc *desc_ptr = (struct invpcid_desc *) data;
>+	int lam_bits = get_sup_lam_bits();
>+	bool fault;
>+
>+	if (!this_cpu_has(X86_FEATURE_PCID) ||
>+	    !this_cpu_has(X86_FEATURE_INVPCID)) {
>+		report_skip("INVPCID not supported");
>+		return;
>+	}
>+
>+	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
>+	desc_ptr->addr = (u64)data + 16;

why "+16"? looks you try to avoid invalidating mapping for the descriptor itself.

how about using a local invpcid_desc?

	struct invpcid_desc desc = { .addr = data };

>+
>+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>+	report(!fault, "INVPCID: untagged pointer + untagged addr");
>+
>+	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
>+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>+	report(fault, "INVPCID: untagged pointer + tagged addr causes #GP");
>+
>+	desc_ptr->addr = (u64)data + 16;
>+	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
>+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>+	if (lam_enumerated && (read_cr4() & X86_CR4_LAM_SUP))
>+		report(!fault, "INVPCID: tagged pointer + untagged addr");
>+	else
>+		report(fault, "INVPCID: tagged pointer + untagged addr"
>+		              " causes #GP");
>+
>+	desc_ptr = (struct invpcid_desc *)data;
>+	desc_ptr->addr = (u64)data + 16;
>+	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
>+	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
>+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>+	report(fault, "INVPCID: tagged pointer + tagged addr causes #GP");
>+}
>+
>+static void test_lam_sup(bool lam_enumerated, bool fep_available)
>+{
>+	void *vaddr, *vaddr_mmio;
>+	phys_addr_t paddr;
>+	bool fault;
>+	bool la_57 = read_cr4() & X86_CR4_LA57;
>+	int lam_bits = get_sup_lam_bits();
>+
>+	vaddr = alloc_vpage();
>+	vaddr_mmio = alloc_vpage();
>+	paddr = virt_to_phys(alloc_page());
>+	install_page(current_page_table(), paddr, vaddr);
>+	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
>+
>+	test_cr4_lam_set_clear(lam_enumerated);
>+
>+	/* Set for the following LAM_SUP tests */
>+	if (lam_enumerated) {
>+		fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
>+		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
>+		       "Set CR4.LAM_SUP");
>+	}
>+
>+	test_tagged_ptr(lam_enumerated, lam_bits, (u64)vaddr, la_57);
>+	test_tagged_mmio_ptr(lam_enumerated, lam_bits, (u64)vaddr_mmio, la_57);
>+	test_invlpg(vaddr, false);
>+	test_invpcid(lam_enumerated, vaddr);
>+
>+	if (fep_available)
>+		test_invlpg(vaddr, true);
>+}
>+
>+int main(int ac, char **av)
>+{
>+	bool lam_enumerated;
>+	bool fep_available = is_fep_available();
>+
>+	setup_vm();
>+
>+	lam_enumerated = this_cpu_has(X86_FEATURE_LAM);

has_lam?

>+	if (!lam_enumerated)
>+		report_info("This CPU doesn't support LAM feature\n");
>+	else
>+		report_info("This CPU supports LAM feature\n");
>+
>+	if (!fep_available)
>+		report_skip("Skipping tests the forced emulation, "
>+			    "use kvm.force_emulation_prefix=1 to enable\n");
>+
>+	test_lam_sup(lam_enumerated, fep_available);
>+
>+	return report_summary();
>+}
>diff --git a/x86/unittests.cfg b/x86/unittests.cfg
>index f324e32..34b09eb 100644
>--- a/x86/unittests.cfg
>+++ b/x86/unittests.cfg
>@@ -478,3 +478,13 @@ file = cet.flat
> arch = x86_64
> smp = 2
> extra_params = -enable-kvm -m 2048 -cpu host
>+
>+[intel-lam]
>+file = lam.flat
>+arch = x86_64
>+extra_params = -enable-kvm -cpu host
>+
>+[intel-no-lam]
>+file = lam.flat
>+arch = x86_64
>+extra_params = -enable-kvm -cpu host,-lam
>-- 
>2.25.1
>
Binbin Wu April 10, 2023, 3:33 p.m. UTC | #2
On 4/6/2023 11:50 AM, Chao Gao wrote:
> On Sun, Mar 19, 2023 at 04:22:23PM +0800, Binbin Wu wrote:
>> +#define LAM57_BITS 6
>> +#define LAM48_BITS 15
>> +#define LAM57_MASK	GENMASK_ULL(62, 57)
>> +#define LAM48_MASK	GENMASK_ULL(62, 48)
>> +
>> +struct invpcid_desc {
>> +    u64 pcid : 12;
>> +    u64 rsv  : 52;
>> +    u64 addr : 64;
> u64 addr;
>
>> +
>> +};
>> +static int get_sup_lam_bits(void)
>> +{
>> +	if (this_cpu_has(X86_FEATURE_LA57) && read_cr4() & X86_CR4_LA57)
>> +		return LAM57_BITS;
>> +	else
>> +		return LAM48_BITS;
>> +}
>> +
>> +/* According to LAM mode, set metadata in high bits */
>> +static u64 set_metadata(u64 src, unsigned long lam)
>> +{
>> +	u64 metadata;
>> +
>> +	switch (lam) {
>> +	case LAM57_BITS: /* Set metadata in bits 62:57 */
>> +		metadata = (NONCANONICAL & ((1UL << LAM57_BITS) - 1)) << 57;
>> +		metadata |= (src & ~(LAM57_MASK));
> this can be simplified to
>
> 	return (src & ~LAM47_MASK) | (NONCANONICAL & LAM47_MASK);
>
> and you can pass a mask to set_metadata() and set mask to 0 if LAM isn't
> enabled. Then set_metadata() can be further simplified to
>
> 	return (src & ~mask) | (NONCANONICAL & mask);
>
>> +		break;
>> +	case LAM48_BITS: /* Set metadata in bits 62:48 */
>> +		metadata = (NONCANONICAL & ((1UL << LAM48_BITS) - 1)) << 48;
>> +		metadata |= (src & ~(LAM48_MASK));
>> +		break;
>> +	default:
>> +		metadata = src;
>> +		break;
>> +	}
>> +
>> +	return metadata;
>> +}
>> +
>> +static void cr4_set_lam_sup(void *data)
>> +{
>> +	unsigned long cr4;
>> +
>> +	cr4 = read_cr4();
>> +	write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
>> +}
>> +
>> +static void cr4_clear_lam_sup(void *data)
>> +{
>> +	unsigned long cr4;
>> +
>> +	cr4 = read_cr4();
>> +	write_cr4_safe(cr4 & ~X86_CR4_LAM_SUP);
>> +}
>> +
>> +static void test_cr4_lam_set_clear(bool lam_enumerated)
>> +{
>> +	bool fault;
>> +
>> +	fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
>> +	if (lam_enumerated)
>> +		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
>> +		       "Set CR4.LAM_SUP");
>> +	else
>> +		report(fault, "Set CR4.LAM_SUP causes #GP");
>> +
>> +	fault = test_for_exception(GP_VECTOR, &cr4_clear_lam_sup, NULL);
>> +	report(!fault, "Clear CR4.LAM_SUP");
>> +}
>> +
>> +static void do_strcpy(void *mem)
>> +{
>> +	strcpy((char *)mem, "LAM SUP Test string.");
>> +}
>> +
>> +static inline uint64_t test_tagged_ptr(uint64_t arg1, uint64_t arg2,
>> +	uint64_t arg3, uint64_t arg4)
>> +{
>> +	bool lam_enumerated = !!arg1;
>> +	int lam_bits = (int)arg2;
>> +	u64 *ptr = (u64 *)arg3;
>> +	bool la_57 = !!arg4;
>> +	bool fault;
>> +
>> +	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>> +	report(!fault, "strcpy to untagged addr");
>> +
>> +	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
>> +	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>> +	if (lam_enumerated)
>> +		report(!fault, "strcpy to tagged addr");
>> +	else
>> +		report(fault, "strcpy to tagged addr causes #GP");
> ...
>
>> +
>> +	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
>> +		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
>> +		fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
>> +		report(fault, "strcpy to non-LAM-canonical addr causes #GP");
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +/* Refer to emulator.c */
>> +static void do_mov_mmio(void *mem)
>> +{
>> +	unsigned long t1, t2;
>> +
>> +	// test mov reg, r/m and mov r/m, reg
>> +	t1 = 0x123456789abcdefull & -1ul;
>> +	asm volatile("mov %[t1], (%[mem])\n\t"
>> +		     "mov (%[mem]), %[t2]"
>> +		     : [t2]"=r"(t2)
>> +		     : [t1]"r"(t1), [mem]"r"(mem)
>> +		     : "memory");
>> +}
>> +
>> +static inline uint64_t test_tagged_mmio_ptr(uint64_t arg1, uint64_t arg2,
>> +	uint64_t arg3, uint64_t arg4)
>> +{
>> +	bool lam_enumerated = !!arg1;
>> +	int lam_bits = (int)arg2;
>> +	u64 *ptr = (u64 *)arg3;
>> +	bool la_57 = !!arg4;
>> +	bool fault;
>> +
>> +	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>> +	report(!fault, "Access MMIO with untagged addr");
>> +
>> +	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
>> +	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>> +	if (lam_enumerated)
>> +		report(!fault,  "Access MMIO with tagged addr");
>> +	else
>> +		report(fault,  "Access MMIO with tagged addr causes #GP");
> Maybe make this (and other similar changes) more dense, e.g.,
>
> 	report(fault != lam_enumerated, "Access MMIO with tagged addr")
>
>> +	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
>> +		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
>> +		fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
>> +		report(fault,  "Access MMIO with non-LAM-canonical addr"
>> +		               " causes #GP");
> don't break long strings.
>
>> +	}
> please add a comment to explain the intention of this test.
>
>> +
>> +	return 0;
>> +}
>> +
>> +static void do_invlpg(void *mem)
>> +{
>> +	invlpg(mem);
>> +}
>> +
>> +static void do_invlpg_fep(void *mem)
>> +{
>> +	asm volatile(KVM_FEP "invlpg (%0)" ::"r" (mem) : "memory");
>> +}
>> +
>> +/* invlpg with tagged address is same as NOP, no #GP */
>> +static void test_invlpg(void *va, bool fep)
>> +{
>> +	bool fault;
>> +	u64 *ptr;
>> +
>> +	ptr = (u64 *)set_metadata((u64)va, get_sup_lam_bits());
>> +	if (fep)
>> +		fault = test_for_exception(GP_VECTOR, do_invlpg_fep, ptr);
>> +	else
>> +		fault = test_for_exception(GP_VECTOR, do_invlpg, ptr);
>> +
>> +	report(!fault, "%sINVLPG with tagged addr", fep?"fep: ":"");
>> +}
>> +
>> +static void do_invpcid(void *desc)
>> +{
>> +	unsigned long type = 0;
>> +	struct invpcid_desc *desc_ptr = (struct invpcid_desc *)desc;
>> +
>> +	asm volatile("invpcid %0, %1" :
>> +	                              : "m" (*desc_ptr), "r" (type)
>> +	                              : "memory");
>> +}
>> +
>> +static void test_invpcid(bool lam_enumerated, void *data)
>> +{
>> +	struct invpcid_desc *desc_ptr = (struct invpcid_desc *) data;
>> +	int lam_bits = get_sup_lam_bits();
>> +	bool fault;
>> +
>> +	if (!this_cpu_has(X86_FEATURE_PCID) ||
>> +	    !this_cpu_has(X86_FEATURE_INVPCID)) {
>> +		report_skip("INVPCID not supported");
>> +		return;
>> +	}
>> +
>> +	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
>> +	desc_ptr->addr = (u64)data + 16;
> why "+16"? looks you try to avoid invalidating mapping for the descriptor itself.

It's ture, no need to "+16".


>
> how about using a local invpcid_desc?
>
> 	struct invpcid_desc desc = { .addr = data };

The test case is for LAM_SUP, I also want to test the pointer of the 
descriptor.
But in kvm-unit-tests, the stack memory address doesn't obey the rule of 
kernel space address,
so I reuse the memory address.
Maybe I can add a comment to explain why.


Other advices are accepted.



>
>> +
>> +	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>> +	report(!fault, "INVPCID: untagged pointer + untagged addr");
>> +
>> +	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
>> +	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>> +	report(fault, "INVPCID: untagged pointer + tagged addr causes #GP");
>> +
>> +	desc_ptr->addr = (u64)data + 16;
>> +	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
>> +	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>> +	if (lam_enumerated && (read_cr4() & X86_CR4_LAM_SUP))
>> +		report(!fault, "INVPCID: tagged pointer + untagged addr");
>> +	else
>> +		report(fault, "INVPCID: tagged pointer + untagged addr"
>> +		              " causes #GP");
>> +
>> +	desc_ptr = (struct invpcid_desc *)data;
>> +	desc_ptr->addr = (u64)data + 16;
>> +	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
>> +	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
>> +	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
>> +	report(fault, "INVPCID: tagged pointer + tagged addr causes #GP");
>> +}
>> +
>> +static void test_lam_sup(bool lam_enumerated, bool fep_available)
>> +{
>> +	void *vaddr, *vaddr_mmio;
>> +	phys_addr_t paddr;
>> +	bool fault;
>> +	bool la_57 = read_cr4() & X86_CR4_LA57;
>> +	int lam_bits = get_sup_lam_bits();
>> +
>> +	vaddr = alloc_vpage();
>> +	vaddr_mmio = alloc_vpage();
>> +	paddr = virt_to_phys(alloc_page());
>> +	install_page(current_page_table(), paddr, vaddr);
>> +	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
>> +
>> +	test_cr4_lam_set_clear(lam_enumerated);
>> +
>> +	/* Set for the following LAM_SUP tests */
>> +	if (lam_enumerated) {
>> +		fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
>> +		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
>> +		       "Set CR4.LAM_SUP");
>> +	}
>> +
>> +	test_tagged_ptr(lam_enumerated, lam_bits, (u64)vaddr, la_57);
>> +	test_tagged_mmio_ptr(lam_enumerated, lam_bits, (u64)vaddr_mmio, la_57);
>> +	test_invlpg(vaddr, false);
>> +	test_invpcid(lam_enumerated, vaddr);
>> +
>> +	if (fep_available)
>> +		test_invlpg(vaddr, true);
>> +}
>> +
>> +int main(int ac, char **av)
>> +{
>> +	bool lam_enumerated;
>> +	bool fep_available = is_fep_available();
>> +
>> +	setup_vm();
>> +
>> +	lam_enumerated = this_cpu_has(X86_FEATURE_LAM);
> has_lam?
>
>> +	if (!lam_enumerated)
>> +		report_info("This CPU doesn't support LAM feature\n");
>> +	else
>> +		report_info("This CPU supports LAM feature\n");
>> +
>> +	if (!fep_available)
>> +		report_skip("Skipping tests the forced emulation, "
>> +			    "use kvm.force_emulation_prefix=1 to enable\n");
>> +
>> +	test_lam_sup(lam_enumerated, fep_available);
>> +
>> +	return report_summary();
>> +}
>> diff --git a/x86/unittests.cfg b/x86/unittests.cfg
>> index f324e32..34b09eb 100644
>> --- a/x86/unittests.cfg
>> +++ b/x86/unittests.cfg
>> @@ -478,3 +478,13 @@ file = cet.flat
>> arch = x86_64
>> smp = 2
>> extra_params = -enable-kvm -m 2048 -cpu host
>> +
>> +[intel-lam]
>> +file = lam.flat
>> +arch = x86_64
>> +extra_params = -enable-kvm -cpu host
>> +
>> +[intel-no-lam]
>> +file = lam.flat
>> +arch = x86_64
>> +extra_params = -enable-kvm -cpu host,-lam
>> -- 
>> 2.25.1
>>
diff mbox series

Patch

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 8373bbe..4bb8cd7 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -107,6 +107,8 @@ 
 #define X86_CR4_CET		BIT(X86_CR4_CET_BIT)
 #define X86_CR4_PKS_BIT		(24)
 #define X86_CR4_PKS		BIT(X86_CR4_PKS_BIT)
+#define X86_CR4_LAM_SUP_BIT	(28)
+#define X86_CR4_LAM_SUP	BIT(X86_CR4_LAM_SUP_BIT)
 
 #define X86_EFLAGS_CF_BIT	(0)
 #define X86_EFLAGS_CF		BIT(X86_EFLAGS_CF_BIT)
@@ -250,6 +252,7 @@  static inline bool is_intel(void)
 #define	X86_FEATURE_SPEC_CTRL		(CPUID(0x7, 0, EDX, 26))
 #define	X86_FEATURE_ARCH_CAPABILITIES	(CPUID(0x7, 0, EDX, 29))
 #define	X86_FEATURE_PKS			(CPUID(0x7, 0, ECX, 31))
+#define	X86_FEATURE_LAM			(CPUID(0x7, 1, EAX, 26))
 
 /*
  * Extended Leafs, a.k.a. AMD defined
diff --git a/x86/Makefile.x86_64 b/x86/Makefile.x86_64
index f483dea..fa11eb3 100644
--- a/x86/Makefile.x86_64
+++ b/x86/Makefile.x86_64
@@ -34,6 +34,7 @@  tests += $(TEST_DIR)/rdpru.$(exe)
 tests += $(TEST_DIR)/pks.$(exe)
 tests += $(TEST_DIR)/pmu_lbr.$(exe)
 tests += $(TEST_DIR)/pmu_pebs.$(exe)
+tests += $(TEST_DIR)/lam.$(exe)
 
 ifeq ($(CONFIG_EFI),y)
 tests += $(TEST_DIR)/amd_sev.$(exe)
diff --git a/x86/lam.c b/x86/lam.c
new file mode 100644
index 0000000..a5f4e51
--- /dev/null
+++ b/x86/lam.c
@@ -0,0 +1,296 @@ 
+/*
+ * Intel LAM_SUP unit test
+ *
+ * Copyright (C) 2023 Intel
+ *
+ * Author: Robert Hoo <robert.hu@linux.intel.com>
+ *         Binbin Wu <binbin.wu@linux.intel.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or
+ * later.
+ */
+
+#include "libcflat.h"
+#include "processor.h"
+#include "desc.h"
+#include "vmalloc.h"
+#include "alloc_page.h"
+#include "vm.h"
+#include "asm/io.h"
+#include "ioram.h"
+
+#define LAM57_BITS 6
+#define LAM48_BITS 15
+#define LAM57_MASK	GENMASK_ULL(62, 57)
+#define LAM48_MASK	GENMASK_ULL(62, 48)
+
+struct invpcid_desc {
+    u64 pcid : 12;
+    u64 rsv  : 52;
+    u64 addr : 64;
+};
+
+static int get_sup_lam_bits(void)
+{
+	if (this_cpu_has(X86_FEATURE_LA57) && read_cr4() & X86_CR4_LA57)
+		return LAM57_BITS;
+	else
+		return LAM48_BITS;
+}
+
+/* According to LAM mode, set metadata in high bits */
+static u64 set_metadata(u64 src, unsigned long lam)
+{
+	u64 metadata;
+
+	switch (lam) {
+	case LAM57_BITS: /* Set metadata in bits 62:57 */
+		metadata = (NONCANONICAL & ((1UL << LAM57_BITS) - 1)) << 57;
+		metadata |= (src & ~(LAM57_MASK));
+		break;
+	case LAM48_BITS: /* Set metadata in bits 62:48 */
+		metadata = (NONCANONICAL & ((1UL << LAM48_BITS) - 1)) << 48;
+		metadata |= (src & ~(LAM48_MASK));
+		break;
+	default:
+		metadata = src;
+		break;
+	}
+
+	return metadata;
+}
+
+static void cr4_set_lam_sup(void *data)
+{
+	unsigned long cr4;
+
+	cr4 = read_cr4();
+	write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
+}
+
+static void cr4_clear_lam_sup(void *data)
+{
+	unsigned long cr4;
+
+	cr4 = read_cr4();
+	write_cr4_safe(cr4 & ~X86_CR4_LAM_SUP);
+}
+
+static void test_cr4_lam_set_clear(bool lam_enumerated)
+{
+	bool fault;
+
+	fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
+	if (lam_enumerated)
+		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
+		       "Set CR4.LAM_SUP");
+	else
+		report(fault, "Set CR4.LAM_SUP causes #GP");
+
+	fault = test_for_exception(GP_VECTOR, &cr4_clear_lam_sup, NULL);
+	report(!fault, "Clear CR4.LAM_SUP");
+}
+
+static void do_strcpy(void *mem)
+{
+	strcpy((char *)mem, "LAM SUP Test string.");
+}
+
+static inline uint64_t test_tagged_ptr(uint64_t arg1, uint64_t arg2,
+	uint64_t arg3, uint64_t arg4)
+{
+	bool lam_enumerated = !!arg1;
+	int lam_bits = (int)arg2;
+	u64 *ptr = (u64 *)arg3;
+	bool la_57 = !!arg4;
+	bool fault;
+
+	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
+	report(!fault, "strcpy to untagged addr");
+
+	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
+	fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
+	if (lam_enumerated)
+		report(!fault, "strcpy to tagged addr");
+	else
+		report(fault, "strcpy to tagged addr causes #GP");
+
+	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
+		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
+		fault = test_for_exception(GP_VECTOR, do_strcpy, ptr);
+		report(fault, "strcpy to non-LAM-canonical addr causes #GP");
+	}
+
+	return 0;
+}
+
+/* Refer to emulator.c */
+static void do_mov_mmio(void *mem)
+{
+	unsigned long t1, t2;
+
+	// test mov reg, r/m and mov r/m, reg
+	t1 = 0x123456789abcdefull & -1ul;
+	asm volatile("mov %[t1], (%[mem])\n\t"
+		     "mov (%[mem]), %[t2]"
+		     : [t2]"=r"(t2)
+		     : [t1]"r"(t1), [mem]"r"(mem)
+		     : "memory");
+}
+
+static inline uint64_t test_tagged_mmio_ptr(uint64_t arg1, uint64_t arg2,
+	uint64_t arg3, uint64_t arg4)
+{
+	bool lam_enumerated = !!arg1;
+	int lam_bits = (int)arg2;
+	u64 *ptr = (u64 *)arg3;
+	bool la_57 = !!arg4;
+	bool fault;
+
+	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
+	report(!fault, "Access MMIO with untagged addr");
+
+	ptr = (u64 *)set_metadata((u64)ptr, lam_bits);
+	fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
+	if (lam_enumerated)
+		report(!fault,  "Access MMIO with tagged addr");
+	else
+		report(fault,  "Access MMIO with tagged addr causes #GP");
+
+	if (lam_enumerated && (lam_bits==LAM57_BITS) && !la_57) {
+		ptr = (u64 *)set_metadata((u64)ptr, LAM48_BITS);
+		fault = test_for_exception(GP_VECTOR, do_mov_mmio, ptr);
+		report(fault,  "Access MMIO with non-LAM-canonical addr"
+		               " causes #GP");
+	}
+
+	return 0;
+}
+
+static void do_invlpg(void *mem)
+{
+	invlpg(mem);
+}
+
+static void do_invlpg_fep(void *mem)
+{
+	asm volatile(KVM_FEP "invlpg (%0)" ::"r" (mem) : "memory");
+}
+
+/* invlpg with tagged address is same as NOP, no #GP */
+static void test_invlpg(void *va, bool fep)
+{
+	bool fault;
+	u64 *ptr;
+
+	ptr = (u64 *)set_metadata((u64)va, get_sup_lam_bits());
+	if (fep)
+		fault = test_for_exception(GP_VECTOR, do_invlpg_fep, ptr);
+	else
+		fault = test_for_exception(GP_VECTOR, do_invlpg, ptr);
+
+	report(!fault, "%sINVLPG with tagged addr", fep?"fep: ":"");
+}
+
+static void do_invpcid(void *desc)
+{
+	unsigned long type = 0;
+	struct invpcid_desc *desc_ptr = (struct invpcid_desc *)desc;
+
+	asm volatile("invpcid %0, %1" :
+	                              : "m" (*desc_ptr), "r" (type)
+	                              : "memory");
+}
+
+static void test_invpcid(bool lam_enumerated, void *data)
+{
+	struct invpcid_desc *desc_ptr = (struct invpcid_desc *) data;
+	int lam_bits = get_sup_lam_bits();
+	bool fault;
+
+	if (!this_cpu_has(X86_FEATURE_PCID) ||
+	    !this_cpu_has(X86_FEATURE_INVPCID)) {
+		report_skip("INVPCID not supported");
+		return;
+	}
+
+	memset(desc_ptr, 0, sizeof(struct invpcid_desc));
+	desc_ptr->addr = (u64)data + 16;
+
+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
+	report(!fault, "INVPCID: untagged pointer + untagged addr");
+
+	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
+	report(fault, "INVPCID: untagged pointer + tagged addr causes #GP");
+
+	desc_ptr->addr = (u64)data + 16;
+	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
+	if (lam_enumerated && (read_cr4() & X86_CR4_LAM_SUP))
+		report(!fault, "INVPCID: tagged pointer + untagged addr");
+	else
+		report(fault, "INVPCID: tagged pointer + untagged addr"
+		              " causes #GP");
+
+	desc_ptr = (struct invpcid_desc *)data;
+	desc_ptr->addr = (u64)data + 16;
+	desc_ptr->addr = set_metadata(desc_ptr->addr, lam_bits);
+	desc_ptr = (struct invpcid_desc *)set_metadata((u64)desc_ptr, lam_bits);
+	fault = test_for_exception(GP_VECTOR, do_invpcid, desc_ptr);
+	report(fault, "INVPCID: tagged pointer + tagged addr causes #GP");
+}
+
+static void test_lam_sup(bool lam_enumerated, bool fep_available)
+{
+	void *vaddr, *vaddr_mmio;
+	phys_addr_t paddr;
+	bool fault;
+	bool la_57 = read_cr4() & X86_CR4_LA57;
+	int lam_bits = get_sup_lam_bits();
+
+	vaddr = alloc_vpage();
+	vaddr_mmio = alloc_vpage();
+	paddr = virt_to_phys(alloc_page());
+	install_page(current_page_table(), paddr, vaddr);
+	install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
+
+	test_cr4_lam_set_clear(lam_enumerated);
+
+	/* Set for the following LAM_SUP tests */
+	if (lam_enumerated) {
+		fault = test_for_exception(GP_VECTOR, &cr4_set_lam_sup, NULL);
+		report(!fault && (read_cr4() & X86_CR4_LAM_SUP),
+		       "Set CR4.LAM_SUP");
+	}
+
+	test_tagged_ptr(lam_enumerated, lam_bits, (u64)vaddr, la_57);
+	test_tagged_mmio_ptr(lam_enumerated, lam_bits, (u64)vaddr_mmio, la_57);
+	test_invlpg(vaddr, false);
+	test_invpcid(lam_enumerated, vaddr);
+
+	if (fep_available)
+		test_invlpg(vaddr, true);
+}
+
+int main(int ac, char **av)
+{
+	bool lam_enumerated;
+	bool fep_available = is_fep_available();
+
+	setup_vm();
+
+	lam_enumerated = this_cpu_has(X86_FEATURE_LAM);
+	if (!lam_enumerated)
+		report_info("This CPU doesn't support LAM feature\n");
+	else
+		report_info("This CPU supports LAM feature\n");
+
+	if (!fep_available)
+		report_skip("Skipping tests the forced emulation, "
+			    "use kvm.force_emulation_prefix=1 to enable\n");
+
+	test_lam_sup(lam_enumerated, fep_available);
+
+	return report_summary();
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index f324e32..34b09eb 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -478,3 +478,13 @@  file = cet.flat
 arch = x86_64
 smp = 2
 extra_params = -enable-kvm -m 2048 -cpu host
+
+[intel-lam]
+file = lam.flat
+arch = x86_64
+extra_params = -enable-kvm -cpu host
+
+[intel-no-lam]
+file = lam.flat
+arch = x86_64
+extra_params = -enable-kvm -cpu host,-lam