@@ -8,6 +8,14 @@
#include <stdint.h>
#define NONCANONICAL 0xaaaaaaaaaaaaaaaaull
+#define LAM57_MASK GENMASK_ULL(62, 57)
+#define LAM48_MASK GENMASK_ULL(62, 48)
+
+/* Set metadata with non-canonical pattern in mask bits of a linear address */
+static inline u64 set_la_non_canonical(u64 src, u64 mask)
+{
+ return (src & ~mask) | (NONCANONICAL & mask);
+}
#ifdef __x86_64__
# define R "r"
@@ -120,6 +128,8 @@
#define X86_CR4_CET BIT(X86_CR4_CET_BIT)
#define X86_CR4_PKS_BIT (24)
#define X86_CR4_PKS BIT(X86_CR4_PKS_BIT)
+#define X86_CR4_LAM_SUP_BIT (28)
+#define X86_CR4_LAM_SUP BIT(X86_CR4_LAM_SUP_BIT)
#define X86_EFLAGS_CF_BIT (0)
#define X86_EFLAGS_CF BIT(X86_EFLAGS_CF_BIT)
@@ -968,4 +978,14 @@ struct invpcid_desc {
u64 addr : 64;
} __attribute__((packed));
+static inline bool is_la57_enabled(void)
+{
+ return !!(read_cr4() & X86_CR4_LA57);
+}
+
+static inline bool is_lam_sup_enabled(void)
+{
+ return !!(read_cr4() & X86_CR4_LAM_SUP);
+}
+
#endif
@@ -38,6 +38,7 @@ tests += $(TEST_DIR)/rdpru.$(exe)
tests += $(TEST_DIR)/pks.$(exe)
tests += $(TEST_DIR)/pmu_lbr.$(exe)
tests += $(TEST_DIR)/pmu_pebs.$(exe)
+tests += $(TEST_DIR)/lam.$(exe)
ifeq ($(CONFIG_EFI),y)
tests += $(TEST_DIR)/amd_sev.$(exe)
new file mode 100644
@@ -0,0 +1,214 @@
+/*
+ * Intel LAM unit test
+ *
+ * Copyright (C) 2023 Intel
+ *
+ * Author: Robert Hoo <robert.hu@linux.intel.com>
+ * Binbin Wu <binbin.wu@linux.intel.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or
+ * later.
+ */
+
+#include "libcflat.h"
+#include "processor.h"
+#include "desc.h"
+#include "vmalloc.h"
+#include "alloc_page.h"
+#include "vm.h"
+#include "asm/io.h"
+#include "ioram.h"
+
+static void test_cr4_lam_set_clear(void)
+{
+ int vector;
+ bool has_lam = this_cpu_has(X86_FEATURE_LAM);
+
+ vector = write_cr4_safe(read_cr4() | X86_CR4_LAM_SUP);
+ report(has_lam ? !vector : vector == GP_VECTOR,
+ "Expected CR4.LAM_SUP=1 to %s", has_lam ? "succeed" : "#GP");
+
+ vector = write_cr4_safe(read_cr4() & ~X86_CR4_LAM_SUP);
+ report(!vector, "Expected CR4.LAM_SUP=0 to succeed");
+}
+
+/* Refer to emulator.c */
+static void do_mov(void *mem)
+{
+ unsigned long t1, t2;
+
+ t1 = 0x123456789abcdefull & -1ul;
+ asm volatile("mov %[t1], (%[mem])\n\t"
+ "mov (%[mem]), %[t2]"
+ : [t2]"=r"(t2)
+ : [t1]"r"(t1), [mem]"r"(mem)
+ : "memory");
+ report(t1 == t2, "Mov result check");
+}
+
+static bool get_lam_mask(u64 address, u64* lam_mask)
+{
+ /*
+ * Use LAM57_MASK as mask to construct non-canonical address if LAM is
+ * not supported or enabled.
+ */
+ *lam_mask = LAM57_MASK;
+
+ /*
+ * Bit 63 determines if the address should be treated as a user address
+ * or a supervisor address.
+ */
+ if (address & BIT_ULL(63)) {
+ if (!(is_lam_sup_enabled()))
+ return false;
+
+ if (!is_la57_enabled())
+ *lam_mask = LAM48_MASK;
+ return true;
+ }
+
+ /* TODO: Get LAM mask for userspace address. */
+ return false;
+}
+
+
+static void test_ptr(u64* ptr, bool is_mmio)
+{
+ u64 lam_mask;
+ bool lam_active, fault;
+
+ lam_active = get_lam_mask((u64)ptr, &lam_mask);
+
+ fault = test_for_exception(GP_VECTOR, do_mov, ptr);
+ report(!fault, "Expected access to untagged address for %s to succeed",
+ is_mmio ? "MMIO" : "memory");
+
+ ptr = (u64 *)set_la_non_canonical((u64)ptr, lam_mask);
+ fault = test_for_exception(GP_VECTOR, do_mov, ptr);
+ report(fault != lam_active, "Expected access to tagged address for %s %s LAM to %s",
+ is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without",
+ lam_active ? "succeed" : "#GP");
+}
+
+/* invlpg with tagged address is same as NOP, no #GP expected. */
+static void test_invlpg(void *va, bool fep)
+{
+ u64 lam_mask;
+ u64 *ptr;
+
+ /*
+ * The return value is not checked, invlpg should never faults no matter
+ * LAM is supported or not.
+ */
+ get_lam_mask((u64)va, &lam_mask);
+ ptr = (u64 *)set_la_non_canonical((u64)va, lam_mask);
+ if (fep)
+ asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory");
+ else
+ invlpg(ptr);
+
+ report(true, "Expected %sINVLPG with tagged addr to succeed", fep ? "fep: " : "");
+}
+
+/* LAM doesn't apply to the linear address in the descriptor of invpcid */
+static void test_invpcid(void *data)
+{
+ /*
+ * Reuse the memory address for the descriptor since stack memory
+ * address in KUT doesn't follow the kernel address space partitions.
+ */
+ struct invpcid_desc *desc_ptr = (struct invpcid_desc *)data;
+ int vector;
+ u64 lam_mask;
+ bool lam_active;
+
+ if (!this_cpu_has(X86_FEATURE_INVPCID)) {
+ report_skip("INVPCID not supported");
+ return;
+ }
+
+ lam_active = get_lam_mask((u64)data, &lam_mask);
+
+ memset(desc_ptr, 0, sizeof(struct invpcid_desc));
+ desc_ptr->addr = (u64)data;
+
+ vector = invpcid_safe(0, desc_ptr);
+ report(!vector,
+ "Expected INVPCID with untagged pointer + untagged addr to succeed");
+
+ desc_ptr->addr = set_la_non_canonical(desc_ptr->addr, lam_mask);
+ vector = invpcid_safe(0, desc_ptr);
+ report(vector==GP_VECTOR,
+ "Expected INVPCID with untagged pointer + tagged addr to #GP");
+
+ desc_ptr = (struct invpcid_desc *)set_la_non_canonical((u64)desc_ptr,
+ lam_mask);
+ vector = invpcid_safe(0, desc_ptr);
+ report(vector==GP_VECTOR,
+ "Expected INVPCID with tagged pointer + tagged addr to #GP");
+
+ desc_ptr = (struct invpcid_desc *)data;
+ desc_ptr->addr = (u64)data;
+ desc_ptr = (struct invpcid_desc *)set_la_non_canonical((u64)desc_ptr,
+ lam_mask);
+ vector = invpcid_safe(0, desc_ptr);
+ report(lam_active ? !vector : vector==GP_VECTOR,
+ "Expected INVPCID with tagged pointer + untagged addr to %s",
+ lam_active? "succeed" : "#GP");
+}
+
+static void test_lam_sup(void)
+{
+ void *vaddr, *vaddr_mmio;
+ phys_addr_t paddr;
+ unsigned long cr4 = read_cr4();
+ int vector;
+
+ /*
+ * KUT initializes vfree_top to 0 for X86_64, and each virtual address
+ * allocation decreases the size from vfree_top. It's guaranteed that
+ * the return value of alloc_vpage() is considered as kernel mode
+ * address and canonical since only a small mount virtual address range
+ * is allocated in this test.
+ */
+ vaddr = alloc_vpage();
+ vaddr_mmio = alloc_vpage();
+ paddr = virt_to_phys(alloc_page());
+ install_page(current_page_table(), paddr, vaddr);
+ install_page(current_page_table(), IORAM_BASE_PHYS, vaddr_mmio);
+
+ test_cr4_lam_set_clear();
+
+ /* Set for the following LAM_SUP tests. */
+ if (this_cpu_has(X86_FEATURE_LAM)) {
+ vector = write_cr4_safe(cr4 | X86_CR4_LAM_SUP);
+ report(!vector && is_lam_sup_enabled(),
+ "Expected CR4.LAM_SUP=1 to succeed");
+ }
+
+ /* Test for normal memory. */
+ test_ptr(vaddr, false);
+ /* Test for MMIO to trigger instruction emulation. */
+ test_ptr(vaddr_mmio, true);
+ test_invpcid(vaddr);
+ test_invlpg(vaddr, false);
+ if (is_fep_available())
+ test_invlpg(vaddr, true);
+ else
+ report_skip("Skipping tests the forced emulation, "
+ "use kvm.force_emulation_prefix=1 to enable\n");
+}
+
+int main(int ac, char **av)
+{
+ setup_vm();
+
+ if (!this_cpu_has(X86_FEATURE_LAM))
+ report_info("This CPU doesn't support LAM feature\n");
+ else
+ report_info("This CPU supports LAM feature\n");
+
+ test_lam_sup();
+
+ return report_summary();
+}
@@ -490,3 +490,13 @@ file = cet.flat
arch = x86_64
smp = 2
extra_params = -enable-kvm -m 2048 -cpu host
+
+[intel-lam]
+file = lam.flat
+arch = x86_64
+extra_params = -enable-kvm -cpu host
+
+[intel-no-lam]
+file = lam.flat
+arch = x86_64
+extra_params = -enable-kvm -cpu host,-lam