@@ -1389,9 +1389,13 @@ asmlinkage void kvm_spurious_fault(void);
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
cleanup_insn "\n\t" \
- "cmpb $0, kvm_rebooting \n\t" \
+ "cmpb $0, kvm_rebooting" __ASM_SEL(,(%%rip)) " \n\t" \
"jne 668b \n\t" \
- __ASM_SIZE(push) " $666b \n\t" \
+ __ASM_SIZE(push) "$0 \n\t" \
+ __ASM_SIZE(push) "%%" _ASM_AX " \n\t" \
+ _ASM_MOVABS " $666b, %%" _ASM_AX "\n\t" \
+ _ASM_MOV " %%" _ASM_AX ", " __ASM_SEL(4,8) "(%%" _ASM_SP ") \n\t" \
+ __ASM_SIZE(pop) "%%" _ASM_AX " \n\t" \
"call kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
@@ -726,8 +726,10 @@ asm(
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
-"movq __per_cpu_offset(,%rdi,8), %rax;"
-"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+"leaq __per_cpu_offset(%rip), %rax;"
+"movq (%rax,%rdi,8), %rax;"
+"addq " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rip), %rax;"
+"cmpb $0, (%rax);"
"setne %al;"
"ret;"
".popsection");
@@ -707,12 +707,12 @@ static u32 svm_msrpm_offset(u32 msr)
static inline void clgi(void)
{
- asm volatile (__ex(SVM_CLGI));
+ asm volatile (__ex(SVM_CLGI) : :);
}
static inline void stgi(void)
{
- asm volatile (__ex(SVM_STGI));
+ asm volatile (__ex(SVM_STGI) : :);
}
static inline void invlpga(unsigned long addr, u32 asid)
Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. The new __ASM_MOVABS macro is used to get the address of a symbol on both 32 and 64-bit with PIE support. Position Independent Executable (PIE) support will allow to extend the KASLR randomization range 0xffffffff80000000. Signed-off-by: Thomas Garnier <thgarnie@google.com> --- arch/x86/include/asm/kvm_host.h | 8 ++++++-- arch/x86/kernel/kvm.c | 6 ++++-- arch/x86/kvm/svm.c | 4 ++-- 3 files changed, 12 insertions(+), 6 deletions(-)