diff mbox series

[kvm-unit-tests,v1,1/2] s390x/spec_ex: Add test introducing odd address into PSW

Message ID 20230215171852.1935156-2-nsg@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series s390x: Add misaligned instruction tests | expand

Commit Message

Nina Schoetterl-Glausch Feb. 15, 2023, 5:18 p.m. UTC
Instructions on s390 must be halfword aligned.
Introducing an odd instruction address into the PSW leads to a
specification exception when attempting to execute the instruction at
the odd address.
Add a test for this.

Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
---
 s390x/spec_ex.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 69 insertions(+), 4 deletions(-)

Comments

Claudio Imbrenda Feb. 17, 2023, 11:05 a.m. UTC | #1
On Wed, 15 Feb 2023 18:18:51 +0100
Nina Schoetterl-Glausch <nsg@linux.ibm.com> wrote:

> Instructions on s390 must be halfword aligned.
> Introducing an odd instruction address into the PSW leads to a
> specification exception when attempting to execute the instruction at
> the odd address.
> Add a test for this.
> 
> Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
> ---
>  s390x/spec_ex.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 69 insertions(+), 4 deletions(-)
> 
> diff --git a/s390x/spec_ex.c b/s390x/spec_ex.c
> index 42ecaed3..b6764677 100644
> --- a/s390x/spec_ex.c
> +++ b/s390x/spec_ex.c
> @@ -44,9 +44,10 @@ static void fixup_invalid_psw(struct stack_frame_int *stack)
>  /*
>   * Load possibly invalid psw, but setup fixup_psw before,
>   * so that fixup_invalid_psw() can bring us back onto the right track.
> + * The provided argument is loaded into register 1.
>   * Also acts as compiler barrier, -> none required in expect/check_invalid_psw
>   */
> -static void load_psw(struct psw psw)
> +static void load_psw_with_arg(struct psw psw, uint64_t arg)
>  {
>  	uint64_t scratch;
>  
> @@ -57,15 +58,22 @@ static void load_psw(struct psw psw)
>  	fixup_psw.mask = extract_psw_mask();
>  	asm volatile ( "larl	%[scratch],0f\n"
>  		"	stg	%[scratch],%[fixup_addr]\n"
> +		"	lgr	%%r1,%[arg]\n"
>  		"	lpswe	%[psw]\n"
>  		"0:	nop\n"
>  		: [scratch] "=&d" (scratch),
>  		  [fixup_addr] "=&T" (fixup_psw.addr)
> -		: [psw] "Q" (psw)
> -		: "cc", "memory"
> +		: [psw] "Q" (psw),
> +		  [arg] "d" (arg)
> +		: "cc", "memory", "%r1"
>  	);
>  }
>  
> +static void load_psw(struct psw psw)
> +{
> +	load_psw_with_arg(psw, 0);
> +}
> +
>  static void load_short_psw(struct short_psw psw)
>  {
>  	uint64_t scratch;
> @@ -88,12 +96,18 @@ static void expect_invalid_psw(struct psw psw)
>  	invalid_psw_expected = true;
>  }
>  
> +static void clear_invalid_psw(void)
> +{
> +	expected_psw = (struct psw){0};

as of today, you can use PSW(0, 0)  :)

> +	invalid_psw_expected = false;
> +}
> +
>  static int check_invalid_psw(void)
>  {
>  	/* Since the fixup sets this to false we check for false here. */
>  	if (!invalid_psw_expected) {
>  		if (expected_psw.mask == invalid_psw.mask &&
> -		    expected_psw.addr == invalid_psw.addr)
> +		    expected_psw.addr == invalid_psw.addr - lowcore.pgm_int_id)

can you explain this change?

>  			return 0;
>  		report_fail("Wrong invalid PSW");
>  	} else {
> @@ -115,6 +129,56 @@ static int psw_bit_12_is_1(void)
>  	return check_invalid_psw();
>  }
>  
> +static int psw_odd_address(void)
> +{
> +	struct psw odd = {

now you can use PSW_WITH_CUR_MASK(0) here

> +		.mask = extract_psw_mask(),
> +	};
> +	uint64_t regs[16];
> +	int r;
> +
> +	/*
> +	 * This asm is reentered at an odd address, which should cause a specification
> +	 * exception before the first unaligned instruction is executed.
> +	 * In this case, the interrupt handler fixes the address and the test succeeds.
> +	 * If, however, unaligned instructions *are* executed, they are jumped to
> +	 * from somewhere, with unknown registers, so save and restore those before.
> +	 */

I wonder if this could be simplified

> +	asm volatile ( "stmg	%%r0,%%r15,%[regs]\n"
> +		//can only offset by even number when using larl -> increment by one
> +		"	larl	%[r],0f\n"
> +		"	aghi	%[r],1\n"
> +		"	stg	%[r],%[addr]\n"

the above is ok (set up illegal PSW)

(maybe call expect_invalid_psw here, see comments below)

put the address of the exit label in a register

then do a lpswe here to jump to the invalid PSW

> +		"	xr	%[r],%[r]\n"
> +		"	brc	0xf,1f\n"

then do the above. that will only happen if the PSW was not loaded.

> +		"0:	. = . + 1\n"

if we are here, things went wrong.
write something in r, jump to the exit label (using the address in the
register that we saved earlier)

> +		"	lmg	%%r0,%%r15,0(%%r1)\n"
> +		//address of the instruction itself, should be odd, store for assert
> +		"	larl	%[r],0\n"
> +		"	stg	%[r],%[addr]\n"
> +		"	larl	%[r],0f\n"
> +		"	aghi	%[r],1\n"
> +		"	bcr	0xf,%[r]\n"
> +		"0:	. = . + 1\n"
> +		"1:\n"
> +	: [addr] "=T" (odd.addr),
> +	  [regs] "=Q" (regs),
> +	  [r] "=d" (r)
> +	: : "cc", "memory"
> +	);
> +

if we come out here and r is 0, then things went well, otherwise we
fail.

> +	if (!r) {
> +		expect_invalid_psw(odd);

that ^ should probably go before the asm (or _in_ the asm, maybe you
can call the C function from asm)

> +		load_psw_with_arg(odd, (uint64_t)&regs);

this would not be needed anymore ^


this way you don't need to save registers or do crazy things where you
jump back in the middle of the asm from C code. and then you don't even
need load_psw_with_arg


> +		return check_invalid_psw();
> +	} else {
> +		assert(odd.addr & 1);
> +		clear_invalid_psw();
> +		report_fail("executed unaligned instructions");
> +		return 1;
> +	}
> +}
> +
>  /* A short PSW needs to have bit 12 set to be valid. */
>  static int short_psw_bit_12_is_0(void)
>  {
> @@ -176,6 +240,7 @@ struct spec_ex_trigger {
>  static const struct spec_ex_trigger spec_ex_triggers[] = {
>  	{ "psw_bit_12_is_1", &psw_bit_12_is_1, false, &fixup_invalid_psw },
>  	{ "short_psw_bit_12_is_0", &short_psw_bit_12_is_0, false, &fixup_invalid_psw },
> +	{ "psw_odd_address", &psw_odd_address, false, &fixup_invalid_psw },
>  	{ "bad_alignment", &bad_alignment, true, NULL },
>  	{ "not_even", &not_even, true, NULL },
>  	{ NULL, NULL, false, NULL },
Nina Schoetterl-Glausch Feb. 20, 2023, 2:09 p.m. UTC | #2
On Fri, 2023-02-17 at 12:05 +0100, Claudio Imbrenda wrote:
> On Wed, 15 Feb 2023 18:18:51 +0100
> Nina Schoetterl-Glausch <nsg@linux.ibm.com> wrote:
> 
> > Instructions on s390 must be halfword aligned.
> > Introducing an odd instruction address into the PSW leads to a
> > specification exception when attempting to execute the instruction at
> > the odd address.
> > Add a test for this.
> > 
> > Signed-off-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
> > ---
> >  s390x/spec_ex.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++---
> >  1 file changed, 69 insertions(+), 4 deletions(-)
> > 
> > diff --git a/s390x/spec_ex.c b/s390x/spec_ex.c
> > index 42ecaed3..b6764677 100644
> > --- a/s390x/spec_ex.c
> > +++ b/s390x/spec_ex.c
> > @@ -44,9 +44,10 @@ static void fixup_invalid_psw(struct stack_frame_int *stack)
> >  /*
> >   * Load possibly invalid psw, but setup fixup_psw before,
> >   * so that fixup_invalid_psw() can bring us back onto the right track.
> > + * The provided argument is loaded into register 1.
> >   * Also acts as compiler barrier, -> none required in expect/check_invalid_psw
> >   */
> > -static void load_psw(struct psw psw)
> > +static void load_psw_with_arg(struct psw psw, uint64_t arg)
> >  {
> >  	uint64_t scratch;
> >  
> > @@ -57,15 +58,22 @@ static void load_psw(struct psw psw)
> >  	fixup_psw.mask = extract_psw_mask();
> >  	asm volatile ( "larl	%[scratch],0f\n"
> >  		"	stg	%[scratch],%[fixup_addr]\n"
> > +		"	lgr	%%r1,%[arg]\n"
> >  		"	lpswe	%[psw]\n"
> >  		"0:	nop\n"
> >  		: [scratch] "=&d" (scratch),
> >  		  [fixup_addr] "=&T" (fixup_psw.addr)
> > -		: [psw] "Q" (psw)
> > -		: "cc", "memory"
> > +		: [psw] "Q" (psw),
> > +		  [arg] "d" (arg)
> > +		: "cc", "memory", "%r1"
> >  	);
> >  }
> >  
> > +static void load_psw(struct psw psw)
> > +{
> > +	load_psw_with_arg(psw, 0);
> > +}
> > +
> >  static void load_short_psw(struct short_psw psw)
> >  {
> >  	uint64_t scratch;
> > @@ -88,12 +96,18 @@ static void expect_invalid_psw(struct psw psw)
> >  	invalid_psw_expected = true;
> >  }
> >  
> > +static void clear_invalid_psw(void)
> > +{
> > +	expected_psw = (struct psw){0};
> 
> as of today, you can use PSW(0, 0)  :)
> 
> > +	invalid_psw_expected = false;
> > +}
> > +
> >  static int check_invalid_psw(void)
> >  {
> >  	/* Since the fixup sets this to false we check for false here. */
> >  	if (!invalid_psw_expected) {
> >  		if (expected_psw.mask == invalid_psw.mask &&
> > -		    expected_psw.addr == invalid_psw.addr)
> > +		    expected_psw.addr == invalid_psw.addr - lowcore.pgm_int_id)
> 
> can you explain this change?

In the existing invalid PSW tests, the instruction length code is 0, so no
change there. In case of an odd address being introduced into the PSW, the
address is incremented by an unpredictable amount, the subtraction removes that.
> 
> >  			return 0;
> >  		report_fail("Wrong invalid PSW");
> >  	} else {
> > @@ -115,6 +129,56 @@ static int psw_bit_12_is_1(void)
> >  	return check_invalid_psw();
> >  }
> >  
> > +static int psw_odd_address(void)
> > +{
> > +	struct psw odd = {
> 
> now you can use PSW_WITH_CUR_MASK(0) here
> 
> > +		.mask = extract_psw_mask(),
> > +	};
> > +	uint64_t regs[16];
> > +	int r;
> > +
> > +	/*
> > +	 * This asm is reentered at an odd address, which should cause a specification
> > +	 * exception before the first unaligned instruction is executed.
> > +	 * In this case, the interrupt handler fixes the address and the test succeeds.
> > +	 * If, however, unaligned instructions *are* executed, they are jumped to
> > +	 * from somewhere, with unknown registers, so save and restore those before.
> > +	 */
> 
> I wonder if this could be simplified
> 
> > +	asm volatile ( "stmg	%%r0,%%r15,%[regs]\n"
> > +		//can only offset by even number when using larl -> increment by one
> > +		"	larl	%[r],0f\n"
> > +		"	aghi	%[r],1\n"
> > +		"	stg	%[r],%[addr]\n"
> 
> the above is ok (set up illegal PSW)
> 
> (maybe call expect_invalid_psw here, see comments below)
> 
> put the address of the exit label in a register
> 
> then do a lpswe here to jump to the invalid PSW
> 
> > +		"	xr	%[r],%[r]\n"
> > +		"	brc	0xf,1f\n"
> 
> then do the above. that will only happen if the PSW was not loaded.
> 
> > +		"0:	. = . + 1\n"
> 
> if we are here, things went wrong.
> write something in r, jump to the exit label (using the address in the
> register that we saved earlier)
> 
> > +		"	lmg	%%r0,%%r15,0(%%r1)\n"
> > +		//address of the instruction itself, should be odd, store for assert
> > +		"	larl	%[r],0\n"
> > +		"	stg	%[r],%[addr]\n"
> > +		"	larl	%[r],0f\n"
> > +		"	aghi	%[r],1\n"
> > +		"	bcr	0xf,%[r]\n"
> > +		"0:	. = . + 1\n"
> > +		"1:\n"
> > +	: [addr] "=T" (odd.addr),
> > +	  [regs] "=Q" (regs),
> > +	  [r] "=d" (r)
> > +	: : "cc", "memory"
> > +	);
> > +
> 
> if we come out here and r is 0, then things went well, otherwise we
> fail.
> 
> > +	if (!r) {
> > +		expect_invalid_psw(odd);
> 
> that ^ should probably go before the asm (or _in_ the asm, maybe you
> can call the C function from asm)
> 
> > +		load_psw_with_arg(odd, (uint64_t)&regs);
> 
> this would not be needed anymore ^
> 
> 
> this way you don't need to save registers or do crazy things where you
> jump back in the middle of the asm from C code. and then you don't even
> need load_psw_with_arg
> 
I'll see what I can do.
> 
> > +		return check_invalid_psw();
> > +	} else {
> > +		assert(odd.addr & 1);
> > +		clear_invalid_psw();
> > +		report_fail("executed unaligned instructions");
> > +		return 1;
> > +	}
> > +}
> > +
> >  /* A short PSW needs to have bit 12 set to be valid. */
> >  static int short_psw_bit_12_is_0(void)
> >  {
> > @@ -176,6 +240,7 @@ struct spec_ex_trigger {
> >  static const struct spec_ex_trigger spec_ex_triggers[] = {
> >  	{ "psw_bit_12_is_1", &psw_bit_12_is_1, false, &fixup_invalid_psw },
> >  	{ "short_psw_bit_12_is_0", &short_psw_bit_12_is_0, false, &fixup_invalid_psw },
> > +	{ "psw_odd_address", &psw_odd_address, false, &fixup_invalid_psw },
> >  	{ "bad_alignment", &bad_alignment, true, NULL },
> >  	{ "not_even", &not_even, true, NULL },
> >  	{ NULL, NULL, false, NULL },
>
diff mbox series

Patch

diff --git a/s390x/spec_ex.c b/s390x/spec_ex.c
index 42ecaed3..b6764677 100644
--- a/s390x/spec_ex.c
+++ b/s390x/spec_ex.c
@@ -44,9 +44,10 @@  static void fixup_invalid_psw(struct stack_frame_int *stack)
 /*
  * Load possibly invalid psw, but setup fixup_psw before,
  * so that fixup_invalid_psw() can bring us back onto the right track.
+ * The provided argument is loaded into register 1.
  * Also acts as compiler barrier, -> none required in expect/check_invalid_psw
  */
-static void load_psw(struct psw psw)
+static void load_psw_with_arg(struct psw psw, uint64_t arg)
 {
 	uint64_t scratch;
 
@@ -57,15 +58,22 @@  static void load_psw(struct psw psw)
 	fixup_psw.mask = extract_psw_mask();
 	asm volatile ( "larl	%[scratch],0f\n"
 		"	stg	%[scratch],%[fixup_addr]\n"
+		"	lgr	%%r1,%[arg]\n"
 		"	lpswe	%[psw]\n"
 		"0:	nop\n"
 		: [scratch] "=&d" (scratch),
 		  [fixup_addr] "=&T" (fixup_psw.addr)
-		: [psw] "Q" (psw)
-		: "cc", "memory"
+		: [psw] "Q" (psw),
+		  [arg] "d" (arg)
+		: "cc", "memory", "%r1"
 	);
 }
 
+static void load_psw(struct psw psw)
+{
+	load_psw_with_arg(psw, 0);
+}
+
 static void load_short_psw(struct short_psw psw)
 {
 	uint64_t scratch;
@@ -88,12 +96,18 @@  static void expect_invalid_psw(struct psw psw)
 	invalid_psw_expected = true;
 }
 
+static void clear_invalid_psw(void)
+{
+	expected_psw = (struct psw){0};
+	invalid_psw_expected = false;
+}
+
 static int check_invalid_psw(void)
 {
 	/* Since the fixup sets this to false we check for false here. */
 	if (!invalid_psw_expected) {
 		if (expected_psw.mask == invalid_psw.mask &&
-		    expected_psw.addr == invalid_psw.addr)
+		    expected_psw.addr == invalid_psw.addr - lowcore.pgm_int_id)
 			return 0;
 		report_fail("Wrong invalid PSW");
 	} else {
@@ -115,6 +129,56 @@  static int psw_bit_12_is_1(void)
 	return check_invalid_psw();
 }
 
+static int psw_odd_address(void)
+{
+	struct psw odd = {
+		.mask = extract_psw_mask(),
+	};
+	uint64_t regs[16];
+	int r;
+
+	/*
+	 * This asm is reentered at an odd address, which should cause a specification
+	 * exception before the first unaligned instruction is executed.
+	 * In this case, the interrupt handler fixes the address and the test succeeds.
+	 * If, however, unaligned instructions *are* executed, they are jumped to
+	 * from somewhere, with unknown registers, so save and restore those before.
+	 */
+	asm volatile ( "stmg	%%r0,%%r15,%[regs]\n"
+		//can only offset by even number when using larl -> increment by one
+		"	larl	%[r],0f\n"
+		"	aghi	%[r],1\n"
+		"	stg	%[r],%[addr]\n"
+		"	xr	%[r],%[r]\n"
+		"	brc	0xf,1f\n"
+		"0:	. = . + 1\n"
+		"	lmg	%%r0,%%r15,0(%%r1)\n"
+		//address of the instruction itself, should be odd, store for assert
+		"	larl	%[r],0\n"
+		"	stg	%[r],%[addr]\n"
+		"	larl	%[r],0f\n"
+		"	aghi	%[r],1\n"
+		"	bcr	0xf,%[r]\n"
+		"0:	. = . + 1\n"
+		"1:\n"
+	: [addr] "=T" (odd.addr),
+	  [regs] "=Q" (regs),
+	  [r] "=d" (r)
+	: : "cc", "memory"
+	);
+
+	if (!r) {
+		expect_invalid_psw(odd);
+		load_psw_with_arg(odd, (uint64_t)&regs);
+		return check_invalid_psw();
+	} else {
+		assert(odd.addr & 1);
+		clear_invalid_psw();
+		report_fail("executed unaligned instructions");
+		return 1;
+	}
+}
+
 /* A short PSW needs to have bit 12 set to be valid. */
 static int short_psw_bit_12_is_0(void)
 {
@@ -176,6 +240,7 @@  struct spec_ex_trigger {
 static const struct spec_ex_trigger spec_ex_triggers[] = {
 	{ "psw_bit_12_is_1", &psw_bit_12_is_1, false, &fixup_invalid_psw },
 	{ "short_psw_bit_12_is_0", &short_psw_bit_12_is_0, false, &fixup_invalid_psw },
+	{ "psw_odd_address", &psw_odd_address, false, &fixup_invalid_psw },
 	{ "bad_alignment", &bad_alignment, true, NULL },
 	{ "not_even", &not_even, true, NULL },
 	{ NULL, NULL, false, NULL },