@@ -13,6 +13,10 @@ static char syscalls_ia32[] = {
#include <asm/syscalls_32.h>
};
+#ifdef CONFIG_KVM_GUEST
+#include <asm/kvm_para.h>
+#endif
+
int main(void)
{
#ifdef CONFIG_PARAVIRT
@@ -22,6 +26,13 @@ int main(void)
BLANK();
#endif
+#ifdef CONFIG_KVM_GUEST
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
+ BLANK();
+#endif
+#endif
+
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
ENTRY(bx);
ENTRY(cx);
@@ -600,22 +600,21 @@ PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_
#else
+#include <asm/asm-offsets.h>
+
extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
/*
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
- * restoring to/from the stack. It is assumed that the preempted value
- * is at an offset of 16 from the beginning of the kvm_steal_time structure
- * which is verified by the BUILD_BUG_ON() macro below.
+ * restoring to/from the stack.
*/
-#define PREEMPTED_OFFSET 16
asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
"movq __per_cpu_offset(,%rdi,8), %rax;"
-"cmpb $0, " __stringify(PREEMPTED_OFFSET) "+steal_time(%rax);"
+"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne %al;"
"ret;"
".popsection");
@@ -627,11 +626,6 @@ asm(
*/
void __init kvm_spinlock_init(void)
{
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON((offsetof(struct kvm_steal_time, preempted)
- != PREEMPTED_OFFSET) || (sizeof(steal_time.preempted) != 1));
-#endif
-
if (!kvm_para_available())
return;
/* Does host kernel support KVM_FEATURE_PV_UNHALT? */