@@ -1714,7 +1714,7 @@ static void test_smsw(void)
{
MK_INSN(smsw, "movl %cr0, %ebx\n\t"
"movl %ebx, %ecx\n\t"
- "or $0x40000000, %ebx\n\t"
+ "or $0x00000001, %ebx\n\t"
"movl %ebx, %cr0\n\t"
"smswl %eax\n\t"
"movl %ecx, %cr0\n\t");
@@ -1722,7 +1722,9 @@ static void test_smsw(void)
init_inregs(&(struct regs){ .eax = 0x12345678 });
exec_in_big_real_mode(&insn_smsw);
- report("smsw", R_AX | R_BX | R_CX, outregs.eax == outregs.ebx);
+ report("smsw", R_AX | R_BX | R_CX,
+ (outregs.eax & 0xffff) == (outregs.ebx & 0xffff) &&
+ (outregs.eax & 0x1));
}
static void test_xadd(void)
The test currently asserts a kind of undefined behaviour in SMSW per Intel SDM: In non-64-bit modes, when the destination operand is a 32-bit register, the low-order 16 bits of register CR0 are copied into the low-order 16 bits of the register and the high-order 16 bits are undefined. TCG doesn't write high-order 16 bits and the test fails for it. Instead of CR0.CD, set CR0.PE and check only if low-order bits match to avoid ambiguity with the undefined behaviour. Cc: Nadav Amit <namit@vmware.com> Cc: Richard Henderson <rth@twiddle.net> Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com> --- x86/realmode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) An alternative would be to change the undefined behaviour of TCG itself: https://github.com/roolebo/qemu/commit/a5e5ee3a41c52031dfda3b1b903100bcb5639742