Message ID | 1496842891-18011-1-git-send-email-thuth@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 07.06.2017 15:41, Thomas Huth wrote: > Certain CPU instructions will cause an exit of the virtual > machine. Run some of these instructions to check whether > they are emulated right by KVM (or QEMU). > > Signed-off-by: Thomas Huth <thuth@redhat.com> > --- > v3: > - Split the prefix tests into spx and stpx tests, so checking the > exception with check_pgm_int_code() is now possible with the right > report_prefix_push() > - Move low_prot enable/disable code into interrupt.h > - Added -i and -t parameters for running the tests more than once > > lib/s390x/asm/interrupt.h | 20 ++++ > s390x/Makefile | 1 + > s390x/intercept.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++ > s390x/unittests.cfg | 3 + > 4 files changed, 251 insertions(+) > create mode 100644 s390x/intercept.c > > diff --git a/lib/s390x/asm/interrupt.h b/lib/s390x/asm/interrupt.h > index 383d312..e4bde6c 100644 > --- a/lib/s390x/asm/interrupt.h > +++ b/lib/s390x/asm/interrupt.h > @@ -15,4 +15,24 @@ void handle_pgm_int(void); > void expect_pgm_int(void); > void check_pgm_int_code(uint16_t code); > > +/* Activate low-address protection */ > +static inline void low_prot_enable(void) > +{ > + uint64_t cr0; > + > + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); > + cr0 |= 1ULL << (63-35); > + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); You then also might want to use %%c0 here. > +} > + > +/* Disable low-address protection */ > +static inline void low_prot_disable(void) > +{ > + uint64_t cr0; > + > + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); > + cr0 &= ~(1ULL << (63-35)); > + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); Dito. > +} > + > #endif > diff --git a/s390x/Makefile b/s390x/Makefile > index b48f8ab..a61e163 100644 > --- a/s390x/Makefile > +++ b/s390x/Makefile > @@ -1,4 +1,5 @@ > tests = $(TEST_DIR)/selftest.elf > +tests += $(TEST_DIR)/intercept.elf > > all: directories test_cases > > diff --git a/s390x/intercept.c b/s390x/intercept.c > new file mode 100644 > index 0000000..639cb72 > --- /dev/null > +++ b/s390x/intercept.c > @@ -0,0 +1,227 @@ > +/* > + * Interception tests - for s390x CPU instruction that cause a VM exit > + * > + * Copyright (c) 2017 Red Hat Inc > + * > + * Authors: > + * Thomas Huth <thuth@redhat.com> > + * > + * This code is free software; you can redistribute it and/or modify it > + * under the terms of the GNU Library General Public License version 2. > + */ > +#include <libcflat.h> > +#include <asm/asm-offsets.h> > +#include <asm/interrupt.h> > +#include <asm/page.h> > + > +static uint8_t pagebuf[PAGE_SIZE * 2] __attribute__((aligned(PAGE_SIZE * 2))); > + > +static unsigned long nr_iterations; > +static unsigned long time_to_run; > + > +/* Test the STORE PREFIX instruction */ > +static void test_stpx(void) > +{ > + uint32_t old_prefix = -1U, tst_prefix = -1U; > + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; > + > + /* Can we successfully change the prefix? */ > + asm volatile ( > + " stpx %0\n" > + " spx %2\n" > + " stpx %1\n" > + " spx %0\n" > + : "+Q"(old_prefix), "+Q"(tst_prefix) > + : "Q"(new_prefix)); > + report("store prefix", old_prefix == 0 && tst_prefix == new_prefix); > + > + expect_pgm_int(); > + low_prot_enable(); > + asm volatile(" stpx 0(%0) " : : "r"(8)); > + low_prot_disable(); > + check_pgm_int_code(PGM_INT_CODE_PROTECTION); > + > + expect_pgm_int(); > + asm volatile(" stpx 0(%0) " : : "r"(1)); > + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); > + > + expect_pgm_int(); > + asm volatile(" stpx 0(%0) " : : "r"(-8)); > + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); > +} > + > +/* Test the SET PREFIX instruction */ > +static void test_spx(void) > +{ > + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; > + uint32_t old_prefix; > + > + memset(pagebuf, 0, PAGE_SIZE * 2); > + > + /* > + * Temporarily change the prefix page to our buffer, and store > + * some facility bits there ... at least some of them should be > + * set in our buffer afterwards. > + */ > + asm volatile ( > + " stpx %0\n" > + " spx %1\n" > + " stfl 0\n" > + " spx %0\n" > + : "+Q"(old_prefix) > + : "Q"(new_prefix) > + : "memory"); > + report("stfl to new prefix", pagebuf[GEN_LC_STFL] != 0); > + > + expect_pgm_int(); > + asm volatile(" spx 0(%0) " : : "r"(1)); > + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); > + > + expect_pgm_int(); > + asm volatile(" spx 0(%0) " : : "r"(-8)); > + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); > +} > + > +/* Test the STORE CPU ADDRESS instruction */ > +static void test_stap(void) > +{ > + uint16_t cpuid = 0xffff; > + > + asm volatile ("stap %0\n" : "+Q"(cpuid)); > + report("get cpu id", cpuid != 0xffff); CPUID is returned by STIDP (STORE CPU ID). You most likely want to name this "get CPU address" / cpu_addr here. Reviewed-by: David Hildenbrand <david@redhat.com> Just FYI, with my hacked up QEMU (tcg irq injection rework + some alignment checks) I get: PASS: intercept: stpx: store prefix FAIL: intercept: stpx: Program interrupt: expected(4) == received(0) PASS: intercept: stpx: Program interrupt: expected(6) == received(6) PASS: intercept: stpx: Program interrupt: expected(5) == received(5) PASS: intercept: spx: stfl to new prefix FAIL: intercept: spx: Program interrupt: expected(6) == received(0) PASS: intercept: spx: Program interrupt: expected(5) == received(5) PASS: intercept: stap: get cpu id FAIL: intercept: stap: Program interrupt: expected(4) == received(0) FAIL: intercept: stap: Program interrupt: expected(6) == received(0) PASS: intercept: stap: Program interrupt: expected(5) == received(5) PASS: intercept: testblock: page cleared PASS: intercept: testblock: Program interrupt: expected(4) == received(4) PASS: intercept: testblock: Program interrupt: expected(5) == received(5) SUMMARY: 14 tests, 4 unexpected failures EXIT: STATUS=3 Alignment checks for reads I haven't had a look at yet. Low address protection still is an issue.
On 07.06.2017 16:01, David Hildenbrand wrote: > On 07.06.2017 15:41, Thomas Huth wrote: >> Certain CPU instructions will cause an exit of the virtual >> machine. Run some of these instructions to check whether >> they are emulated right by KVM (or QEMU). >> >> Signed-off-by: Thomas Huth <thuth@redhat.com> >> --- >> v3: >> - Split the prefix tests into spx and stpx tests, so checking the >> exception with check_pgm_int_code() is now possible with the right >> report_prefix_push() >> - Move low_prot enable/disable code into interrupt.h >> - Added -i and -t parameters for running the tests more than once >> >> lib/s390x/asm/interrupt.h | 20 ++++ >> s390x/Makefile | 1 + >> s390x/intercept.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++ >> s390x/unittests.cfg | 3 + >> 4 files changed, 251 insertions(+) >> create mode 100644 s390x/intercept.c >> >> diff --git a/lib/s390x/asm/interrupt.h b/lib/s390x/asm/interrupt.h >> index 383d312..e4bde6c 100644 >> --- a/lib/s390x/asm/interrupt.h >> +++ b/lib/s390x/asm/interrupt.h >> @@ -15,4 +15,24 @@ void handle_pgm_int(void); >> void expect_pgm_int(void); >> void check_pgm_int_code(uint16_t code); >> >> +/* Activate low-address protection */ >> +static inline void low_prot_enable(void) >> +{ >> + uint64_t cr0; >> + >> + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); >> + cr0 |= 1ULL << (63-35); >> + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); > > You then also might want to use %%c0 here. D'oh, you're right of course ... I'll send a v4... [...] >> +/* Test the STORE CPU ADDRESS instruction */ >> +static void test_stap(void) >> +{ >> + uint16_t cpuid = 0xffff; >> + >> + asm volatile ("stap %0\n" : "+Q"(cpuid)); >> + report("get cpu id", cpuid != 0xffff); > > CPUID is returned by STIDP (STORE CPU ID). You most likely want to name > this "get CPU address" / cpu_addr here. I'll fix that, too. > Reviewed-by: David Hildenbrand <david@redhat.com> Thanks! Thomas
diff --git a/lib/s390x/asm/interrupt.h b/lib/s390x/asm/interrupt.h index 383d312..e4bde6c 100644 --- a/lib/s390x/asm/interrupt.h +++ b/lib/s390x/asm/interrupt.h @@ -15,4 +15,24 @@ void handle_pgm_int(void); void expect_pgm_int(void); void check_pgm_int_code(uint16_t code); +/* Activate low-address protection */ +static inline void low_prot_enable(void) +{ + uint64_t cr0; + + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); + cr0 |= 1ULL << (63-35); + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); +} + +/* Disable low-address protection */ +static inline void low_prot_disable(void) +{ + uint64_t cr0; + + asm volatile (" stctg %%c0,%%c0,%0 " : : "Q"(cr0) : "memory"); + cr0 &= ~(1ULL << (63-35)); + asm volatile (" lctlg 0,0,%0 " : : "Q"(cr0)); +} + #endif diff --git a/s390x/Makefile b/s390x/Makefile index b48f8ab..a61e163 100644 --- a/s390x/Makefile +++ b/s390x/Makefile @@ -1,4 +1,5 @@ tests = $(TEST_DIR)/selftest.elf +tests += $(TEST_DIR)/intercept.elf all: directories test_cases diff --git a/s390x/intercept.c b/s390x/intercept.c new file mode 100644 index 0000000..639cb72 --- /dev/null +++ b/s390x/intercept.c @@ -0,0 +1,227 @@ +/* + * Interception tests - for s390x CPU instruction that cause a VM exit + * + * Copyright (c) 2017 Red Hat Inc + * + * Authors: + * Thomas Huth <thuth@redhat.com> + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU Library General Public License version 2. + */ +#include <libcflat.h> +#include <asm/asm-offsets.h> +#include <asm/interrupt.h> +#include <asm/page.h> + +static uint8_t pagebuf[PAGE_SIZE * 2] __attribute__((aligned(PAGE_SIZE * 2))); + +static unsigned long nr_iterations; +static unsigned long time_to_run; + +/* Test the STORE PREFIX instruction */ +static void test_stpx(void) +{ + uint32_t old_prefix = -1U, tst_prefix = -1U; + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; + + /* Can we successfully change the prefix? */ + asm volatile ( + " stpx %0\n" + " spx %2\n" + " stpx %1\n" + " spx %0\n" + : "+Q"(old_prefix), "+Q"(tst_prefix) + : "Q"(new_prefix)); + report("store prefix", old_prefix == 0 && tst_prefix == new_prefix); + + expect_pgm_int(); + low_prot_enable(); + asm volatile(" stpx 0(%0) " : : "r"(8)); + low_prot_disable(); + check_pgm_int_code(PGM_INT_CODE_PROTECTION); + + expect_pgm_int(); + asm volatile(" stpx 0(%0) " : : "r"(1)); + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); + + expect_pgm_int(); + asm volatile(" stpx 0(%0) " : : "r"(-8)); + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); +} + +/* Test the SET PREFIX instruction */ +static void test_spx(void) +{ + uint32_t new_prefix = (uint32_t)(intptr_t)pagebuf; + uint32_t old_prefix; + + memset(pagebuf, 0, PAGE_SIZE * 2); + + /* + * Temporarily change the prefix page to our buffer, and store + * some facility bits there ... at least some of them should be + * set in our buffer afterwards. + */ + asm volatile ( + " stpx %0\n" + " spx %1\n" + " stfl 0\n" + " spx %0\n" + : "+Q"(old_prefix) + : "Q"(new_prefix) + : "memory"); + report("stfl to new prefix", pagebuf[GEN_LC_STFL] != 0); + + expect_pgm_int(); + asm volatile(" spx 0(%0) " : : "r"(1)); + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); + + expect_pgm_int(); + asm volatile(" spx 0(%0) " : : "r"(-8)); + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); +} + +/* Test the STORE CPU ADDRESS instruction */ +static void test_stap(void) +{ + uint16_t cpuid = 0xffff; + + asm volatile ("stap %0\n" : "+Q"(cpuid)); + report("get cpu id", cpuid != 0xffff); + + expect_pgm_int(); + low_prot_enable(); + asm volatile ("stap 0(%0)\n" : : "r"(8)); + low_prot_disable(); + check_pgm_int_code(PGM_INT_CODE_PROTECTION); + + expect_pgm_int(); + asm volatile ("stap 0(%0)\n" : : "r"(1)); + check_pgm_int_code(PGM_INT_CODE_SPECIFICATION); + + expect_pgm_int(); + asm volatile ("stap 0(%0)\n" : : "r"(-8)); + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); +} + +/* Test the TEST BLOCK instruction */ +static void test_testblock(void) +{ + int cc; + + memset(pagebuf, 0xaa, PAGE_SIZE); + + asm volatile ( + " lghi %%r0,0\n" + " tb %1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "a"(pagebuf + 0x123) + : "memory", "0", "cc"); + report("page cleared", + cc == 0 && pagebuf[0] == 0 && pagebuf[PAGE_SIZE - 1] == 0); + + expect_pgm_int(); + low_prot_enable(); + asm volatile (" tb %0 " : : "r"(4096)); + low_prot_disable(); + check_pgm_int_code(PGM_INT_CODE_PROTECTION); + + expect_pgm_int(); + asm volatile (" tb %0 " : : "r"(-4096)); + check_pgm_int_code(PGM_INT_CODE_ADDRESSING); +} + +static uint64_t get_clock_ms(void) +{ + uint64_t clk; + + asm volatile(" stck %0 " : : "Q"(clk) : "memory"); + + /* Bit 51 is incrememented each microsecond */ + return (clk >> (63 - 51)) / 1000; +} + +struct { + const char *name; + void (*func)(void); + bool run_it; +} tests[] = { + { "stpx", test_stpx, false }, + { "spx", test_spx, false }, + { "stap", test_stap, false }, + { "testblock", test_testblock, false }, + { NULL, NULL, false } +}; + +void parse_intercept_test_args(int argc, char **argv) +{ + int i, ti; + bool run_all = true; + + for (i = 1; i < argc; i++) { + if (!strcmp("-i", argv[i])) { + i++; + if (i >= argc) + report_abort("-i needs a parameter"); + nr_iterations = atol(argv[i]); + } else if (!strcmp("-t", argv[i])) { + i++; + if (i >= argc) + report_abort("-t needs a parameter"); + time_to_run = atol(argv[i]); + } else for (ti = 0; tests[ti].name != NULL; ti++) { + if (!strcmp(tests[ti].name, argv[i])) { + run_all = false; + tests[ti].run_it = true; + break; + } else if (tests[ti + 1].name == NULL) { + report_abort("Unsupported parameter '%s'", + argv[i]); + } + } + } + + if (run_all) { + for (ti = 0; tests[ti].name != NULL; ti++) + tests[ti].run_it = true; + } +} + +int main(int argc, char **argv) +{ + uint64_t startclk; + int ti; + + parse_intercept_test_args(argc, argv); + + if (nr_iterations == 0 && time_to_run == 0) + nr_iterations = 1; + + report_prefix_push("intercept"); + + startclk = get_clock_ms(); + for (;;) { + for (ti = 0; tests[ti].name != NULL; ti++) { + report_prefix_push(tests[ti].name); + if (tests[ti].run_it) + tests[ti].func(); + report_prefix_pop(); + } + if (nr_iterations) { + nr_iterations -= 1; + if (nr_iterations == 0) + break; + } + if (time_to_run) { + if (get_clock_ms() - startclk > time_to_run) + break; + } + } + + report_prefix_pop(); + + return report_summary(); +} diff --git a/s390x/unittests.cfg b/s390x/unittests.cfg index 92e01ab..3b6b892 100644 --- a/s390x/unittests.cfg +++ b/s390x/unittests.cfg @@ -22,3 +22,6 @@ file = selftest.elf groups = selftest extra_params = -append 'test 123' + +[intercept] +file = intercept.elf
Certain CPU instructions will cause an exit of the virtual machine. Run some of these instructions to check whether they are emulated right by KVM (or QEMU). Signed-off-by: Thomas Huth <thuth@redhat.com> --- v3: - Split the prefix tests into spx and stpx tests, so checking the exception with check_pgm_int_code() is now possible with the right report_prefix_push() - Move low_prot enable/disable code into interrupt.h - Added -i and -t parameters for running the tests more than once lib/s390x/asm/interrupt.h | 20 ++++ s390x/Makefile | 1 + s390x/intercept.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++ s390x/unittests.cfg | 3 + 4 files changed, 251 insertions(+) create mode 100644 s390x/intercept.c