diff mbox series

[kvm-unit-tests,2/7] x86: Replace instruction prefixes with spaces

Message ID 20200810130618.16066-3-r.bolshakov@yadro.com (mailing list archive)
State New, archived
Headers show
Series Add support for generic ELF cross-compiler | expand

Commit Message

Roman Bolshakov Aug. 10, 2020, 1:06 p.m. UTC
There are three kinds of x86 prefix delimiters in GNU binutils:
'/', '\\' and a space.

The first works on Linux and few other platforms.  The second one is
SVR-4 compatible and works on the generic elf target. The last kind is
universal and works everywhere, it's also used in the GAS manual [1].
Space delimiters fix the build errors on x86_64-elf binutils:

  x86/cstart64.S:217: Error: invalid character '/' in mnemonic
  x86/cstart64.S:313: Error: invalid character '/' in mnemonic

1. https://sourceware.org/binutils/docs/as/i386_002dPrefixes.html

Cc: Cameron Esfahani <dirty@apple.com>
Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
---
 x86/cstart.S   |  4 ++--
 x86/cstart64.S |  4 ++--
 x86/emulator.c | 38 +++++++++++++++++++-------------------
 3 files changed, 23 insertions(+), 23 deletions(-)

Comments

Thomas Huth Aug. 28, 2020, 4:34 a.m. UTC | #1
On 10/08/2020 15.06, Roman Bolshakov wrote:
> There are three kinds of x86 prefix delimiters in GNU binutils:
> '/', '\\' and a space.
> 
> The first works on Linux and few other platforms.  The second one is
> SVR-4 compatible and works on the generic elf target. The last kind is
> universal and works everywhere, it's also used in the GAS manual [1].
> Space delimiters fix the build errors on x86_64-elf binutils:
> 
>   x86/cstart64.S:217: Error: invalid character '/' in mnemonic
>   x86/cstart64.S:313: Error: invalid character '/' in mnemonic
> 
> 1. https://sourceware.org/binutils/docs/as/i386_002dPrefixes.html
> 
> Cc: Cameron Esfahani <dirty@apple.com>
> Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
> ---
>  x86/cstart.S   |  4 ++--
>  x86/cstart64.S |  4 ++--
>  x86/emulator.c | 38 +++++++++++++++++++-------------------
>  3 files changed, 23 insertions(+), 23 deletions(-)
> 
> diff --git a/x86/cstart.S b/x86/cstart.S
> index c0efc5f..489c561 100644
> --- a/x86/cstart.S
> +++ b/x86/cstart.S
> @@ -149,7 +149,7 @@ save_id:
>  ap_start32:
>  	setup_segments
>  	mov $-4096, %esp
> -	lock/xaddl %esp, smp_stacktop
> +	lock xaddl %esp, smp_stacktop
>  	setup_percpu_area
>  	call prepare_32
>  	call reset_apic
> @@ -206,7 +206,7 @@ ap_init:
>  	lea sipi_entry, %esi
>  	xor %edi, %edi
>  	mov $(sipi_end - sipi_entry), %ecx
> -	rep/movsb
> +	rep movsb
>  	mov $APIC_DEFAULT_PHYS_BASE, %eax
>  	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%eax)
>  	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%eax)
> diff --git a/x86/cstart64.S b/x86/cstart64.S
> index 2d16688..25a296c 100644
> --- a/x86/cstart64.S
> +++ b/x86/cstart64.S
> @@ -226,7 +226,7 @@ sipi_end:
>  ap_start32:
>  	setup_segments
>  	mov $-4096, %esp
> -	lock/xaddl %esp, smp_stacktop
> +	lock xaddl %esp, smp_stacktop
>  	setup_percpu_area
>  	call prepare_64
>  	ljmpl $8, $ap_start64
> @@ -323,7 +323,7 @@ ap_init:
>  	lea sipi_entry, %rsi
>  	xor %rdi, %rdi
>  	mov $(sipi_end - sipi_entry), %rcx
> -	rep/movsb
> +	rep movsb
>  	mov $APIC_DEFAULT_PHYS_BASE, %eax
>  	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%rax)
>  	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%rax)
> diff --git a/x86/emulator.c b/x86/emulator.c
> index 98743d1..e46d97e 100644
> --- a/x86/emulator.c
> +++ b/x86/emulator.c
> @@ -61,71 +61,71 @@ static void test_cmps_one(unsigned char *m1, unsigned char *m3)
>  
>  	rsi = m1; rdi = m3; rcx = 30;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsb"
> +		     "repe cmpsb"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
>  
>  	rsi = m1; rdi = m3; rcx = 30;
>  	asm volatile("or $1, %[tmp]\n\t" // clear ZF
> -		     "repe/cmpsb"
> +		     "repe cmpsb"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
> -	       "repe/cmpsb (1.zf)");
> +	       "repe cmpsb (1.zf)");
>  
>  	rsi = m1; rdi = m3; rcx = 15;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsw"
> +		     "repe cmpsw"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
> -	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsw (1)");
> +	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
>  
>  	rsi = m1; rdi = m3; rcx = 7;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsl"
> +		     "repe cmpsl"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
> -	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe/cmpll (1)");
> +	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
>  
>  	rsi = m1; rdi = m3; rcx = 4;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsq"
> +		     "repe cmpsq"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
> -	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe/cmpsq (1)");
> +	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
>  
>  	rsi = m1; rdi = m3; rcx = 130;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsb"
> +		     "repe cmpsb"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
> -	       "repe/cmpsb (2)");
> +	       "repe cmpsb (2)");
>  
>  	rsi = m1; rdi = m3; rcx = 65;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsw"
> +		     "repe cmpsw"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
> -	       "repe/cmpsw (2)");
> +	       "repe cmpsw (2)");
>  
>  	rsi = m1; rdi = m3; rcx = 32;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsl"
> +		     "repe cmpsl"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
> -	       "repe/cmpll (2)");
> +	       "repe cmpll (2)");
>  
>  	rsi = m1; rdi = m3; rcx = 16;
>  	asm volatile("xor %[tmp], %[tmp] \n\t"
> -		     "repe/cmpsq"
> +		     "repe cmpsq"
>  		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
>  		     : : "cc");
>  	report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
> -	       "repe/cmpsq (2)");
> +	       "repe cmpsq (2)");
>  
>  }
>  
> @@ -304,8 +304,8 @@ static void test_ljmp(void *mem)
>      volatile int res = 1;
>  
>      *(unsigned long**)m = &&jmpf;
> -    asm volatile ("data16/mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
> -    asm volatile ("rex64/ljmp *%0"::"m"(*m));
> +    asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
> +    asm volatile ("rex64 ljmp *%0"::"m"(*m));
>      res = 0;
>  jmpf:
>      report(res, "ljmp");
> 

Reviewed-by: Thomas Huth <thuth@redhat.com>
diff mbox series

Patch

diff --git a/x86/cstart.S b/x86/cstart.S
index c0efc5f..489c561 100644
--- a/x86/cstart.S
+++ b/x86/cstart.S
@@ -149,7 +149,7 @@  save_id:
 ap_start32:
 	setup_segments
 	mov $-4096, %esp
-	lock/xaddl %esp, smp_stacktop
+	lock xaddl %esp, smp_stacktop
 	setup_percpu_area
 	call prepare_32
 	call reset_apic
@@ -206,7 +206,7 @@  ap_init:
 	lea sipi_entry, %esi
 	xor %edi, %edi
 	mov $(sipi_end - sipi_entry), %ecx
-	rep/movsb
+	rep movsb
 	mov $APIC_DEFAULT_PHYS_BASE, %eax
 	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%eax)
 	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%eax)
diff --git a/x86/cstart64.S b/x86/cstart64.S
index 2d16688..25a296c 100644
--- a/x86/cstart64.S
+++ b/x86/cstart64.S
@@ -226,7 +226,7 @@  sipi_end:
 ap_start32:
 	setup_segments
 	mov $-4096, %esp
-	lock/xaddl %esp, smp_stacktop
+	lock xaddl %esp, smp_stacktop
 	setup_percpu_area
 	call prepare_64
 	ljmpl $8, $ap_start64
@@ -323,7 +323,7 @@  ap_init:
 	lea sipi_entry, %rsi
 	xor %rdi, %rdi
 	mov $(sipi_end - sipi_entry), %rcx
-	rep/movsb
+	rep movsb
 	mov $APIC_DEFAULT_PHYS_BASE, %eax
 	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%rax)
 	movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%rax)
diff --git a/x86/emulator.c b/x86/emulator.c
index 98743d1..e46d97e 100644
--- a/x86/emulator.c
+++ b/x86/emulator.c
@@ -61,71 +61,71 @@  static void test_cmps_one(unsigned char *m1, unsigned char *m3)
 
 	rsi = m1; rdi = m3; rcx = 30;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsb"
+		     "repe cmpsb"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
 
 	rsi = m1; rdi = m3; rcx = 30;
 	asm volatile("or $1, %[tmp]\n\t" // clear ZF
-		     "repe/cmpsb"
+		     "repe cmpsb"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
-	       "repe/cmpsb (1.zf)");
+	       "repe cmpsb (1.zf)");
 
 	rsi = m1; rdi = m3; rcx = 15;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsw"
+		     "repe cmpsw"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
-	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsw (1)");
+	report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
 
 	rsi = m1; rdi = m3; rcx = 7;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsl"
+		     "repe cmpsl"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
-	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe/cmpll (1)");
+	report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
 
 	rsi = m1; rdi = m3; rcx = 4;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsq"
+		     "repe cmpsq"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
-	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe/cmpsq (1)");
+	report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
 
 	rsi = m1; rdi = m3; rcx = 130;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsb"
+		     "repe cmpsb"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
-	       "repe/cmpsb (2)");
+	       "repe cmpsb (2)");
 
 	rsi = m1; rdi = m3; rcx = 65;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsw"
+		     "repe cmpsw"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
-	       "repe/cmpsw (2)");
+	       "repe cmpsw (2)");
 
 	rsi = m1; rdi = m3; rcx = 32;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsl"
+		     "repe cmpsl"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
-	       "repe/cmpll (2)");
+	       "repe cmpll (2)");
 
 	rsi = m1; rdi = m3; rcx = 16;
 	asm volatile("xor %[tmp], %[tmp] \n\t"
-		     "repe/cmpsq"
+		     "repe cmpsq"
 		     : "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
 		     : : "cc");
 	report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
-	       "repe/cmpsq (2)");
+	       "repe cmpsq (2)");
 
 }
 
@@ -304,8 +304,8 @@  static void test_ljmp(void *mem)
     volatile int res = 1;
 
     *(unsigned long**)m = &&jmpf;
-    asm volatile ("data16/mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
-    asm volatile ("rex64/ljmp *%0"::"m"(*m));
+    asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
+    asm volatile ("rex64 ljmp *%0"::"m"(*m));
     res = 0;
 jmpf:
     report(res, "ljmp");