new file mode 100644
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CMM test library
+ *
+ * Copyright IBM Corp. 2022
+ *
+ * Authors:
+ * Nico Boehr <nrb@linux.ibm.com>
+ */
+#include <libcflat.h>
+#include <bitops.h>
+#include "cmm.h"
+
+/*
+ * Maps ESSA actions to states the page is allowed to be in after the
+ * respective action was executed.
+ */
+static const unsigned long allowed_essa_state_masks[4] = {
+ BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */
+ BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */
+ BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */
+ BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */
+};
+
+/*
+ * Set CMM page states on pagebuf.
+ * pagebuf must point to page_count consecutive pages.
+ * page_count must be a multiple of 4.
+ */
+void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count)
+{
+ unsigned long addr = (unsigned long)pagebuf;
+ unsigned long i;
+
+ assert(page_count % 4 == 0);
+ for (i = 0; i < page_count; i += 4) {
+ essa(ESSA_SET_STABLE, addr + i * PAGE_SIZE);
+ essa(ESSA_SET_UNUSED, addr + (i + 1) * PAGE_SIZE);
+ essa(ESSA_SET_VOLATILE, addr + (i + 2) * PAGE_SIZE);
+ essa(ESSA_SET_POT_VOLATILE, addr + (i + 3) * PAGE_SIZE);
+ }
+}
+
+/*
+ * Verify CMM page states on pagebuf.
+ * Page states must have been set by cmm_set_page_states on pagebuf before.
+ * page_count must be a multiple of 4.
+ *
+ * If page states match the expected result, will return a cmm_verify_result
+ * with verify_failed false. All other fields are then invalid.
+ * If there is a mismatch, the returned struct will have verify_failed true
+ * and will be filled with details on the first mismatch encountered.
+ */
+struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count)
+{
+ struct cmm_verify_result result = {
+ .verify_failed = true
+ };
+ unsigned long expected_mask, actual_mask;
+ unsigned long addr, i;
+
+ assert(page_count % 4 == 0);
+
+ for (i = 0; i < page_count; i++) {
+ addr = (unsigned long)(pagebuf + i * PAGE_SIZE);
+ actual_mask = essa(ESSA_GET_STATE, addr);
+ /* usage state in bits 60 and 61 */
+ actual_mask = BIT((actual_mask >> 2) & 0x3);
+ expected_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)];
+ if (!(actual_mask & expected_mask)) {
+ result.page_mismatch_idx = i;
+ result.page_mismatch_addr = addr;
+ result.expected_mask = expected_mask;
+ result.actual_mask = actual_mask;
+ return result;
+ }
+ }
+
+ result.verify_failed = false;
+ return result;
+}
+
+void cmm_report_verify(const struct cmm_verify_result *result)
+{
+ if (result->verify_failed)
+ report_fail("page state mismatch: first page idx = %lu, addr = %lx, "
+ "expected_mask = 0x%x, actual_mask = 0x%x",
+ result->page_mismatch_idx, result->page_mismatch_addr,
+ result->expected_mask, result->actual_mask);
+ else
+ report_pass("page states match");
+}
new file mode 100644
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CMM test library
+ *
+ * Copyright IBM Corp. 2022
+ *
+ * Authors:
+ * Nico Boehr <nrb@linux.ibm.com>
+ */
+#ifndef S390X_CMM_H
+#define S390X_CMM_H
+
+#include <libcflat.h>
+#include <asm/page.h>
+#include <asm/cmm.h>
+
+struct cmm_verify_result {
+ bool verify_failed;
+ char expected_mask;
+ char actual_mask;
+ unsigned long page_mismatch_idx;
+ unsigned long page_mismatch_addr;
+};
+
+void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count);
+
+struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count);
+
+void cmm_report_verify(const struct cmm_verify_result *result);
+
+#endif /* S390X_CMM_H */
@@ -99,6 +99,7 @@ cflatobjs += lib/s390x/malloc_io.o
cflatobjs += lib/s390x/uv.o
cflatobjs += lib/s390x/sie.o
cflatobjs += lib/s390x/fault.o
+cflatobjs += lib/s390x/cmm.o
OBJDIRS += lib/s390x
@@ -9,46 +9,27 @@
*/
#include <libcflat.h>
+#include <cmm.h>
#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/cmm.h>
#include <bitops.h>
#define NUM_PAGES 128
-static uint8_t pagebuf[NUM_PAGES][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+
+static uint8_t pagebuf[NUM_PAGES * PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static void test_migration(void)
{
- int i, state_mask, actual_state;
- /*
- * Maps ESSA actions to states the page is allowed to be in after the
- * respective action was executed.
- */
- int allowed_essa_state_masks[4] = {
- BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */
- BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */
- BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */
- BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */
- };
+ struct cmm_verify_result result;
- assert(NUM_PAGES % 4 == 0);
- for (i = 0; i < NUM_PAGES; i += 4) {
- essa(ESSA_SET_STABLE, (unsigned long)pagebuf[i]);
- essa(ESSA_SET_UNUSED, (unsigned long)pagebuf[i + 1]);
- essa(ESSA_SET_VOLATILE, (unsigned long)pagebuf[i + 2]);
- essa(ESSA_SET_POT_VOLATILE, (unsigned long)pagebuf[i + 3]);
- }
+ cmm_set_page_states(pagebuf, NUM_PAGES);
puts("Please migrate me, then press return\n");
(void)getchar();
- for (i = 0; i < NUM_PAGES; i++) {
- actual_state = essa(ESSA_GET_STATE, (unsigned long)pagebuf[i]);
- /* extract the usage state in bits 60 and 61 */
- actual_state = (actual_state >> 2) & 0x3;
- state_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)];
- report(BIT(actual_state) & state_mask, "page %d state: expected_mask=0x%x actual_mask=0x%lx", i, state_mask, BIT(actual_state));
- }
+ result = cmm_verify_page_states(pagebuf, NUM_PAGES);
+ cmm_report_verify(&result);
}
int main(void)