diff mbox

[10/15] KVM: SVM: Add intercept checks for SVM instructions

Message ID 1301667024-29420-11-git-send-email-joerg.roedel@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Joerg Roedel April 1, 2011, 2:10 p.m. UTC
None
diff mbox

Patch

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 153f17b..363d7c7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -75,6 +75,8 @@ 
 #define Stack       (1<<13)     /* Stack instruction (push/pop) */
 #define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
 #define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
+#define RMExt       (1<<16)     /* Opcode extension in ModRM r/m if mod == 3 */
+
 /* Misc flags */
 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 #define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -2483,11 +2485,44 @@  static int em_check_perm_dr_write(struct x86_emulate_ctxt *ctxt)
 	return em_check_perm_dr_read(ctxt);
 }
 
+static int check_efer_svme(struct x86_emulate_ctxt *ctxt)
+{
+	u64 efer;
+
+	ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer);
+
+	if (!(efer & EFER_SVME))
+		return emulate_ud(ctxt);
+
+	return X86EMUL_CONTINUE;
+}
+
+static int em_check_perm_vmrun_save_load(struct x86_emulate_ctxt *ctxt)
+{
+	u64 rax = kvm_register_read(ctxt->vcpu, VCPU_REGS_RAX);
+
+	/* Valid physical address? */
+	if (rax & 0xffff000000000000)
+		return emulate_gp(ctxt, 0);
+
+	return check_efer_svme(ctxt);
+}
+
+#define em_check_perm_vmrun	em_check_perm_vmrun_save_load
+#define em_check_perm_vmload	em_check_perm_vmrun_save_load
+#define em_check_perm_vmsave	em_check_perm_vmrun_save_load
+#define em_check_perm_vmmcall	check_efer_svme
+#define em_check_perm_stgi	check_efer_svme
+#define em_check_perm_clgi	check_efer_svme
+#define em_check_perm_skinit	check_efer_svme
+#define em_check_perm_invlpga	check_efer_svme
+
 #define D(_y) { .flags = (_y) }
 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
 #define DIP(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i, \
 		      .check_perm = em_check_perm_##_i }
 #define N    D(0)
+#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
@@ -2507,6 +2542,17 @@  static int em_check_perm_dr_write(struct x86_emulate_ctxt *ctxt)
 		D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
 
 
+static struct opcode group7_rm3[] = {
+	DIP(SrcNone | ModRM | Prot | Priv, vmrun),
+	DIP(SrcNone | ModRM | Prot       , vmmcall),
+	DIP(SrcNone | ModRM | Prot | Priv, vmload),
+	DIP(SrcNone | ModRM | Prot | Priv, vmsave),
+	DIP(SrcNone | ModRM | Prot | Priv, stgi),
+	DIP(SrcNone | ModRM | Prot | Priv, clgi),
+	DIP(SrcNone | ModRM | Prot | Priv, skinit),
+	DIP(SrcNone | ModRM | Prot | Priv, invlpga),
+};
+
 static struct opcode group1[] = {
 	X7(D(Lock)), N
 };
@@ -2551,7 +2597,7 @@  static struct group_dual group7 = { {
 	DI(SrcMem | ModRM | ByteOp | Priv | NoAccess, invlpg),
 }, {
 	D(SrcNone | ModRM | Priv | VendorSpecific), N,
-	N, D(SrcNone | ModRM | Priv | VendorSpecific),
+	N, EXT(0, group7_rm3),
 	DI(SrcNone | ModRM | DstMem | Mov, smsw), N,
 	DI(SrcMem16 | ModRM | Mov | Priv, lmsw), N,
 } };
@@ -2746,6 +2792,7 @@  static struct opcode twobyte_table[256] = {
 #undef G
 #undef GD
 #undef I
+#undef EXT
 
 #undef D2bv
 #undef I2bv
@@ -2923,6 +2970,12 @@  done_prefixes:
 			opcode = g_mod3[goffset];
 		else
 			opcode = g_mod012[goffset];
+
+		if (opcode.flags & RMExt) {
+			goffset = c->modrm & 7;
+			opcode = opcode.u.group[goffset];
+		}
+
 		c->d |= opcode.flags;
 	}
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ce251c9..b98d00b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3892,6 +3892,14 @@  static struct __x86_intercept {
 	[x86_intercept_sidt]		= POST_EX(SVM_EXIT_IDTR_READ),
 	[x86_intercept_lgdt]		= POST_EX(SVM_EXIT_GDTR_WRITE),
 	[x86_intercept_lidt]		= POST_EX(SVM_EXIT_IDTR_WRITE),
+	[x86_intercept_vmrun]		= POST_EX(SVM_EXIT_VMRUN),
+	[x86_intercept_vmmcall]		= POST_EX(SVM_EXIT_VMMCALL),
+	[x86_intercept_vmload]		= POST_EX(SVM_EXIT_VMLOAD),
+	[x86_intercept_vmsave]		= POST_EX(SVM_EXIT_VMSAVE),
+	[x86_intercept_stgi]		= POST_EX(SVM_EXIT_STGI),
+	[x86_intercept_clgi]		= POST_EX(SVM_EXIT_CLGI),
+	[x86_intercept_skinit]		= POST_EX(SVM_EXIT_SKINIT),
+	[x86_intercept_invlpga]		= POST_EX(SVM_EXIT_INVLPGA),
 };
 
 #undef POST_EX