Message ID | 20221128132323.1964532-2-nrb@linux.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | s390x: test CMM during migration | expand |
On Mon, 28 Nov 2022 14:23:22 +0100 Nico Boehr <nrb@linux.ibm.com> wrote: > Upcoming changes will add a test which is very similar to the existing > CMM migration test. To reduce code duplication, move the common function > to a library which can be re-used by both tests. > > Signed-off-by: Nico Boehr <nrb@linux.ibm.com> > Reviewed-by: Thomas Huth <thuth@redhat.com> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> > --- > lib/s390x/cmm.c | 92 +++++++++++++++++++++++++++++++++++++++++++ > lib/s390x/cmm.h | 31 +++++++++++++++ > s390x/Makefile | 1 + > s390x/migration-cmm.c | 34 ++++------------ > 4 files changed, 132 insertions(+), 26 deletions(-) > create mode 100644 lib/s390x/cmm.c > create mode 100644 lib/s390x/cmm.h > > diff --git a/lib/s390x/cmm.c b/lib/s390x/cmm.c > new file mode 100644 > index 000000000000..d1399a7445c1 > --- /dev/null > +++ b/lib/s390x/cmm.c > @@ -0,0 +1,92 @@ > +/* SPDX-License-Identifier: GPL-2.0-only */ > +/* > + * CMM test library > + * > + * Copyright IBM Corp. 2022 > + * > + * Authors: > + * Nico Boehr <nrb@linux.ibm.com> > + */ > +#include <libcflat.h> > +#include <bitops.h> > +#include "cmm.h" > + > +/* > + * Maps ESSA actions to states the page is allowed to be in after the > + * respective action was executed. > + */ > +static const unsigned long allowed_essa_state_masks[4] = { > + BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */ > + BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */ > + BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */ > + BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */ > +}; > + > +/* > + * Set CMM page states on pagebuf. > + * pagebuf must point to page_count consecutive pages. > + * page_count must be a multiple of 4. > + */ > +void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count) > +{ > + unsigned long addr = (unsigned long)pagebuf; > + unsigned long i; > + > + assert(page_count % 4 == 0); > + for (i = 0; i < page_count; i += 4) { > + essa(ESSA_SET_STABLE, addr + i * PAGE_SIZE); > + essa(ESSA_SET_UNUSED, addr + (i + 1) * PAGE_SIZE); > + essa(ESSA_SET_VOLATILE, addr + (i + 2) * PAGE_SIZE); > + essa(ESSA_SET_POT_VOLATILE, addr + (i + 3) * PAGE_SIZE); > + } > +} > + > +/* > + * Verify CMM page states on pagebuf. > + * Page states must have been set by cmm_set_page_states on pagebuf before. > + * page_count must be a multiple of 4. > + * > + * If page states match the expected result, will return a cmm_verify_result > + * with verify_failed false. All other fields are then invalid. > + * If there is a mismatch, the returned struct will have verify_failed true > + * and will be filled with details on the first mismatch encountered. > + */ > +struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count) > +{ > + struct cmm_verify_result result = { > + .verify_failed = true > + }; > + unsigned long expected_mask, actual_mask; > + unsigned long addr, i; > + > + assert(page_count % 4 == 0); > + > + for (i = 0; i < page_count; i++) { > + addr = (unsigned long)(pagebuf + i * PAGE_SIZE); > + actual_mask = essa(ESSA_GET_STATE, addr); > + /* usage state in bits 60 and 61 */ > + actual_mask = BIT((actual_mask >> 2) & 0x3); > + expected_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)]; > + if (!(actual_mask & expected_mask)) { > + result.page_mismatch_idx = i; > + result.page_mismatch_addr = addr; > + result.expected_mask = expected_mask; > + result.actual_mask = actual_mask; > + return result; > + } > + } > + > + result.verify_failed = false; > + return result; > +} > + > +void cmm_report_verify(struct cmm_verify_result const *result) > +{ > + if (result->verify_failed) > + report_fail("page state mismatch: first page idx = %lu, addr = %lx, " > + "expected_mask = 0x%x, actual_mask = 0x%x", > + result->page_mismatch_idx, result->page_mismatch_addr, > + result->expected_mask, result->actual_mask); > + else > + report_pass("page states match"); > +} > diff --git a/lib/s390x/cmm.h b/lib/s390x/cmm.h > new file mode 100644 > index 000000000000..9794e091517e > --- /dev/null > +++ b/lib/s390x/cmm.h > @@ -0,0 +1,31 @@ > +/* SPDX-License-Identifier: GPL-2.0-only */ > +/* > + * CMM test library > + * > + * Copyright IBM Corp. 2022 > + * > + * Authors: > + * Nico Boehr <nrb@linux.ibm.com> > + */ > +#ifndef S390X_CMM_H > +#define S390X_CMM_H > + > +#include <libcflat.h> > +#include <asm/page.h> > +#include <asm/cmm.h> > + > +struct cmm_verify_result { > + bool verify_failed; > + char expected_mask; > + char actual_mask; > + unsigned long page_mismatch_idx; > + unsigned long page_mismatch_addr; > +}; > + > +void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count); > + > +struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count); > + > +void cmm_report_verify(struct cmm_verify_result const *result); > + > +#endif /* S390X_CMM_H */ > diff --git a/s390x/Makefile b/s390x/Makefile > index bf1504f9d58c..401cb6371cee 100644 > --- a/s390x/Makefile > +++ b/s390x/Makefile > @@ -99,6 +99,7 @@ cflatobjs += lib/s390x/malloc_io.o > cflatobjs += lib/s390x/uv.o > cflatobjs += lib/s390x/sie.o > cflatobjs += lib/s390x/fault.o > +cflatobjs += lib/s390x/cmm.o > > OBJDIRS += lib/s390x > > diff --git a/s390x/migration-cmm.c b/s390x/migration-cmm.c > index aa7910ca76bf..720ef9fb9799 100644 > --- a/s390x/migration-cmm.c > +++ b/s390x/migration-cmm.c > @@ -14,41 +14,23 @@ > #include <asm/cmm.h> > #include <bitops.h> > > +#include "cmm.h" > + > #define NUM_PAGES 128 > -static uint8_t pagebuf[NUM_PAGES][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); > + > +static uint8_t pagebuf[NUM_PAGES * PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); > > static void test_migration(void) > { > - int i, state_mask, actual_state; > - /* > - * Maps ESSA actions to states the page is allowed to be in after the > - * respective action was executed. > - */ > - int allowed_essa_state_masks[4] = { > - BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */ > - BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */ > - BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */ > - BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */ > - }; > + struct cmm_verify_result result; > > - assert(NUM_PAGES % 4 == 0); > - for (i = 0; i < NUM_PAGES; i += 4) { > - essa(ESSA_SET_STABLE, (unsigned long)pagebuf[i]); > - essa(ESSA_SET_UNUSED, (unsigned long)pagebuf[i + 1]); > - essa(ESSA_SET_VOLATILE, (unsigned long)pagebuf[i + 2]); > - essa(ESSA_SET_POT_VOLATILE, (unsigned long)pagebuf[i + 3]); > - } > + cmm_set_page_states(pagebuf, NUM_PAGES); > > puts("Please migrate me, then press return\n"); > (void)getchar(); > > - for (i = 0; i < NUM_PAGES; i++) { > - actual_state = essa(ESSA_GET_STATE, (unsigned long)pagebuf[i]); > - /* extract the usage state in bits 60 and 61 */ > - actual_state = (actual_state >> 2) & 0x3; > - state_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)]; > - report(BIT(actual_state) & state_mask, "page %d state: expected_mask=0x%x actual_mask=0x%lx", i, state_mask, BIT(actual_state)); > - } > + result = cmm_verify_page_states(pagebuf, NUM_PAGES); > + cmm_report_verify(&result); > } > > int main(void)
diff --git a/lib/s390x/cmm.c b/lib/s390x/cmm.c new file mode 100644 index 000000000000..d1399a7445c1 --- /dev/null +++ b/lib/s390x/cmm.c @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CMM test library + * + * Copyright IBM Corp. 2022 + * + * Authors: + * Nico Boehr <nrb@linux.ibm.com> + */ +#include <libcflat.h> +#include <bitops.h> +#include "cmm.h" + +/* + * Maps ESSA actions to states the page is allowed to be in after the + * respective action was executed. + */ +static const unsigned long allowed_essa_state_masks[4] = { + BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */ + BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */ + BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */ + BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */ +}; + +/* + * Set CMM page states on pagebuf. + * pagebuf must point to page_count consecutive pages. + * page_count must be a multiple of 4. + */ +void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count) +{ + unsigned long addr = (unsigned long)pagebuf; + unsigned long i; + + assert(page_count % 4 == 0); + for (i = 0; i < page_count; i += 4) { + essa(ESSA_SET_STABLE, addr + i * PAGE_SIZE); + essa(ESSA_SET_UNUSED, addr + (i + 1) * PAGE_SIZE); + essa(ESSA_SET_VOLATILE, addr + (i + 2) * PAGE_SIZE); + essa(ESSA_SET_POT_VOLATILE, addr + (i + 3) * PAGE_SIZE); + } +} + +/* + * Verify CMM page states on pagebuf. + * Page states must have been set by cmm_set_page_states on pagebuf before. + * page_count must be a multiple of 4. + * + * If page states match the expected result, will return a cmm_verify_result + * with verify_failed false. All other fields are then invalid. + * If there is a mismatch, the returned struct will have verify_failed true + * and will be filled with details on the first mismatch encountered. + */ +struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count) +{ + struct cmm_verify_result result = { + .verify_failed = true + }; + unsigned long expected_mask, actual_mask; + unsigned long addr, i; + + assert(page_count % 4 == 0); + + for (i = 0; i < page_count; i++) { + addr = (unsigned long)(pagebuf + i * PAGE_SIZE); + actual_mask = essa(ESSA_GET_STATE, addr); + /* usage state in bits 60 and 61 */ + actual_mask = BIT((actual_mask >> 2) & 0x3); + expected_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)]; + if (!(actual_mask & expected_mask)) { + result.page_mismatch_idx = i; + result.page_mismatch_addr = addr; + result.expected_mask = expected_mask; + result.actual_mask = actual_mask; + return result; + } + } + + result.verify_failed = false; + return result; +} + +void cmm_report_verify(struct cmm_verify_result const *result) +{ + if (result->verify_failed) + report_fail("page state mismatch: first page idx = %lu, addr = %lx, " + "expected_mask = 0x%x, actual_mask = 0x%x", + result->page_mismatch_idx, result->page_mismatch_addr, + result->expected_mask, result->actual_mask); + else + report_pass("page states match"); +} diff --git a/lib/s390x/cmm.h b/lib/s390x/cmm.h new file mode 100644 index 000000000000..9794e091517e --- /dev/null +++ b/lib/s390x/cmm.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CMM test library + * + * Copyright IBM Corp. 2022 + * + * Authors: + * Nico Boehr <nrb@linux.ibm.com> + */ +#ifndef S390X_CMM_H +#define S390X_CMM_H + +#include <libcflat.h> +#include <asm/page.h> +#include <asm/cmm.h> + +struct cmm_verify_result { + bool verify_failed; + char expected_mask; + char actual_mask; + unsigned long page_mismatch_idx; + unsigned long page_mismatch_addr; +}; + +void cmm_set_page_states(uint8_t *pagebuf, unsigned long page_count); + +struct cmm_verify_result cmm_verify_page_states(uint8_t *pagebuf, unsigned long page_count); + +void cmm_report_verify(struct cmm_verify_result const *result); + +#endif /* S390X_CMM_H */ diff --git a/s390x/Makefile b/s390x/Makefile index bf1504f9d58c..401cb6371cee 100644 --- a/s390x/Makefile +++ b/s390x/Makefile @@ -99,6 +99,7 @@ cflatobjs += lib/s390x/malloc_io.o cflatobjs += lib/s390x/uv.o cflatobjs += lib/s390x/sie.o cflatobjs += lib/s390x/fault.o +cflatobjs += lib/s390x/cmm.o OBJDIRS += lib/s390x diff --git a/s390x/migration-cmm.c b/s390x/migration-cmm.c index aa7910ca76bf..720ef9fb9799 100644 --- a/s390x/migration-cmm.c +++ b/s390x/migration-cmm.c @@ -14,41 +14,23 @@ #include <asm/cmm.h> #include <bitops.h> +#include "cmm.h" + #define NUM_PAGES 128 -static uint8_t pagebuf[NUM_PAGES][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); + +static uint8_t pagebuf[NUM_PAGES * PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); static void test_migration(void) { - int i, state_mask, actual_state; - /* - * Maps ESSA actions to states the page is allowed to be in after the - * respective action was executed. - */ - int allowed_essa_state_masks[4] = { - BIT(ESSA_USAGE_STABLE), /* ESSA_SET_STABLE */ - BIT(ESSA_USAGE_UNUSED), /* ESSA_SET_UNUSED */ - BIT(ESSA_USAGE_VOLATILE), /* ESSA_SET_VOLATILE */ - BIT(ESSA_USAGE_VOLATILE) | BIT(ESSA_USAGE_POT_VOLATILE) /* ESSA_SET_POT_VOLATILE */ - }; + struct cmm_verify_result result; - assert(NUM_PAGES % 4 == 0); - for (i = 0; i < NUM_PAGES; i += 4) { - essa(ESSA_SET_STABLE, (unsigned long)pagebuf[i]); - essa(ESSA_SET_UNUSED, (unsigned long)pagebuf[i + 1]); - essa(ESSA_SET_VOLATILE, (unsigned long)pagebuf[i + 2]); - essa(ESSA_SET_POT_VOLATILE, (unsigned long)pagebuf[i + 3]); - } + cmm_set_page_states(pagebuf, NUM_PAGES); puts("Please migrate me, then press return\n"); (void)getchar(); - for (i = 0; i < NUM_PAGES; i++) { - actual_state = essa(ESSA_GET_STATE, (unsigned long)pagebuf[i]); - /* extract the usage state in bits 60 and 61 */ - actual_state = (actual_state >> 2) & 0x3; - state_mask = allowed_essa_state_masks[i % ARRAY_SIZE(allowed_essa_state_masks)]; - report(BIT(actual_state) & state_mask, "page %d state: expected_mask=0x%x actual_mask=0x%lx", i, state_mask, BIT(actual_state)); - } + result = cmm_verify_page_states(pagebuf, NUM_PAGES); + cmm_report_verify(&result); } int main(void)