@@ -149,7 +149,7 @@ save_id:
ap_start32:
setup_segments
mov $-4096, %esp
- lock/xaddl %esp, smp_stacktop
+ lock xaddl %esp, smp_stacktop
setup_percpu_area
call prepare_32
call reset_apic
@@ -206,7 +206,7 @@ ap_init:
lea sipi_entry, %esi
xor %edi, %edi
mov $(sipi_end - sipi_entry), %ecx
- rep/movsb
+ rep movsb
mov $APIC_DEFAULT_PHYS_BASE, %eax
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%eax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%eax)
@@ -226,7 +226,7 @@ sipi_end:
ap_start32:
setup_segments
mov $-4096, %esp
- lock/xaddl %esp, smp_stacktop
+ lock xaddl %esp, smp_stacktop
setup_percpu_area
call prepare_64
ljmpl $8, $ap_start64
@@ -323,7 +323,7 @@ ap_init:
lea sipi_entry, %rsi
xor %rdi, %rdi
mov $(sipi_end - sipi_entry), %rcx
- rep/movsb
+ rep movsb
mov $APIC_DEFAULT_PHYS_BASE, %eax
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_INIT | APIC_INT_ASSERT), APIC_ICR(%rax)
movl $(APIC_DEST_ALLBUT | APIC_DEST_PHYSICAL | APIC_DM_STARTUP), APIC_ICR(%rax)
@@ -61,71 +61,71 @@ static void test_cmps_one(unsigned char *m1, unsigned char *m3)
rsi = m1; rdi = m3; rcx = 30;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsb"
+ "repe cmpsb"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsb (1)");
rsi = m1; rdi = m3; rcx = 30;
asm volatile("or $1, %[tmp]\n\t" // clear ZF
- "repe/cmpsb"
+ "repe cmpsb"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30,
- "repe/cmpsb (1.zf)");
+ "repe cmpsb (1.zf)");
rsi = m1; rdi = m3; rcx = 15;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsw"
+ "repe cmpsw"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
- report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe/cmpsw (1)");
+ report(rcx == 0 && rsi == m1 + 30 && rdi == m3 + 30, "repe cmpsw (1)");
rsi = m1; rdi = m3; rcx = 7;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsl"
+ "repe cmpsl"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
- report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe/cmpll (1)");
+ report(rcx == 0 && rsi == m1 + 28 && rdi == m3 + 28, "repe cmpll (1)");
rsi = m1; rdi = m3; rcx = 4;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsq"
+ "repe cmpsq"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
- report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe/cmpsq (1)");
+ report(rcx == 0 && rsi == m1 + 32 && rdi == m3 + 32, "repe cmpsq (1)");
rsi = m1; rdi = m3; rcx = 130;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsb"
+ "repe cmpsb"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 29 && rsi == m1 + 101 && rdi == m3 + 101,
- "repe/cmpsb (2)");
+ "repe cmpsb (2)");
rsi = m1; rdi = m3; rcx = 65;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsw"
+ "repe cmpsw"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 14 && rsi == m1 + 102 && rdi == m3 + 102,
- "repe/cmpsw (2)");
+ "repe cmpsw (2)");
rsi = m1; rdi = m3; rcx = 32;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsl"
+ "repe cmpsl"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 6 && rsi == m1 + 104 && rdi == m3 + 104,
- "repe/cmpll (2)");
+ "repe cmpll (2)");
rsi = m1; rdi = m3; rcx = 16;
asm volatile("xor %[tmp], %[tmp] \n\t"
- "repe/cmpsq"
+ "repe cmpsq"
: "+S"(rsi), "+D"(rdi), "+c"(rcx), [tmp]"=&r"(tmp)
: : "cc");
report(rcx == 3 && rsi == m1 + 104 && rdi == m3 + 104,
- "repe/cmpsq (2)");
+ "repe cmpsq (2)");
}
@@ -304,8 +304,8 @@ static void test_ljmp(void *mem)
volatile int res = 1;
*(unsigned long**)m = &&jmpf;
- asm volatile ("data16/mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
- asm volatile ("rex64/ljmp *%0"::"m"(*m));
+ asm volatile ("data16 mov %%cs, %0":"=m"(*(m + sizeof(unsigned long))));
+ asm volatile ("rex64 ljmp *%0"::"m"(*m));
res = 0;
jmpf:
report(res, "ljmp");