@@ -1817,6 +1817,9 @@ struct kvm_x86_ops {
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
void (*migrate_timers)(struct kvm_vcpu *vcpu);
+
+ const u32 * const possible_passthrough_msrs;
+ const u32 nr_possible_passthrough_msrs;
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
@@ -755,18 +755,6 @@ static void clr_dr_intercepts(struct vcpu_svm *svm)
recalc_intercepts(svm);
}
-static int direct_access_msr_slot(u32 msr)
-{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
- if (direct_access_msrs[i] == msr)
- return i;
- }
-
- return -ENOENT;
-}
-
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{
u8 bit_write;
@@ -832,7 +820,7 @@ void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
struct vcpu_svm *svm = to_svm(vcpu);
int slot;
- slot = direct_access_msr_slot(msr);
+ slot = kvm_passthrough_msr_slot(msr);
WARN_ON(slot == -ENOENT);
if (slot >= 0) {
/* Set the shadow bitmaps to the desired intercept states */
@@ -871,7 +859,7 @@ void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
struct vcpu_svm *svm = to_svm(vcpu);
int slot;
- slot = direct_access_msr_slot(msr);
+ slot = kvm_passthrough_msr_slot(msr);
WARN_ON(slot == -ENOENT);
if (slot >= 0) {
/* Set the shadow bitmaps to the desired intercept states */
@@ -5165,6 +5153,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
+ .possible_passthrough_msrs = direct_access_msrs,
+ .nr_possible_passthrough_msrs = ARRAY_SIZE(direct_access_msrs),
.msr_filter_changed = svm_msr_filter_changed,
.complete_emulated_msr = svm_complete_emulated_msr,
@@ -7,6 +7,62 @@
#include "pmu.h"
#include "posted_intr.h"
+/*
+ * List of MSRs that can be directly passed to the guest.
+ * In addition to these x2apic, PT and LBR MSRs are handled specially.
+ */
+static const u32 vmx_possible_passthrough_msrs[] = {
+ MSR_IA32_SPEC_CTRL,
+ MSR_IA32_PRED_CMD,
+ MSR_IA32_FLUSH_CMD,
+ MSR_IA32_TSC,
+#ifdef CONFIG_X86_64
+ MSR_FS_BASE,
+ MSR_GS_BASE,
+ MSR_KERNEL_GS_BASE,
+ MSR_IA32_XFD,
+ MSR_IA32_XFD_ERR,
+#endif
+ MSR_IA32_SYSENTER_CS,
+ MSR_IA32_SYSENTER_ESP,
+ MSR_IA32_SYSENTER_EIP,
+ MSR_CORE_C1_RES,
+ MSR_CORE_C3_RESIDENCY,
+ MSR_CORE_C6_RESIDENCY,
+ MSR_CORE_C7_RESIDENCY,
+};
+
+void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 i;
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ /*
+ * Redo intercept permissions for MSRs that KVM is passing through to
+ * the guest. Disabling interception will check the new MSR filter and
+ * ensure that KVM enables interception if usersepace wants to filter
+ * the MSR. MSRs that KVM is already intercepting don't need to be
+ * refreshed since KVM is going to intercept them regardless of what
+ * userspace wants.
+ */
+ for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
+ u32 msr = vmx_possible_passthrough_msrs[i];
+
+ if (!test_bit(i, vmx->shadow_msr_intercept.read))
+ vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
+
+ if (!test_bit(i, vmx->shadow_msr_intercept.write))
+ vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
+ }
+
+ /* PT MSRs can be passed through iff PT is exposed to the guest. */
+ if (vmx_pt_mode_is_host_guest())
+ pt_update_intercept_for_msr(vcpu);
+}
+
#define VMX_REQUIRED_APICV_INHIBITS \
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
BIT(APICV_INHIBIT_REASON_ABSENT) | \
@@ -152,6 +208,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
.migrate_timers = vmx_migrate_timers,
+ .possible_passthrough_msrs = vmx_possible_passthrough_msrs,
+ .nr_possible_passthrough_msrs = ARRAY_SIZE(vmx_possible_passthrough_msrs),
.msr_filter_changed = vmx_msr_filter_changed,
.complete_emulated_msr = kvm_complete_insn_gp,
@@ -163,31 +163,6 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
RTIT_STATUS_BYTECNT))
-/*
- * List of MSRs that can be directly passed to the guest.
- * In addition to these x2apic, PT and LBR MSRs are handled specially.
- */
-static const u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
- MSR_IA32_SPEC_CTRL,
- MSR_IA32_PRED_CMD,
- MSR_IA32_FLUSH_CMD,
- MSR_IA32_TSC,
-#ifdef CONFIG_X86_64
- MSR_FS_BASE,
- MSR_GS_BASE,
- MSR_KERNEL_GS_BASE,
- MSR_IA32_XFD,
- MSR_IA32_XFD_ERR,
-#endif
- MSR_IA32_SYSENTER_CS,
- MSR_IA32_SYSENTER_ESP,
- MSR_IA32_SYSENTER_EIP,
- MSR_CORE_C1_RES,
- MSR_CORE_C3_RESIDENCY,
- MSR_CORE_C6_RESIDENCY,
- MSR_CORE_C7_RESIDENCY,
-};
-
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -669,7 +644,7 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
static int vmx_get_passthrough_msr_slot(u32 msr)
{
- int i;
+ int r;
switch (msr) {
case 0x800 ... 0x8ff:
@@ -692,13 +667,10 @@ static int vmx_get_passthrough_msr_slot(u32 msr)
return -ENOENT;
}
- for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
- if (vmx_possible_passthrough_msrs[i] == msr)
- return i;
- }
+ r = kvm_passthrough_msr_slot(msr);
- WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
- return -ENOENT;
+ WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
+ return r;
}
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
@@ -4145,37 +4117,6 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
}
}
-void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 i;
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- /*
- * Redo intercept permissions for MSRs that KVM is passing through to
- * the guest. Disabling interception will check the new MSR filter and
- * ensure that KVM enables interception if usersepace wants to filter
- * the MSR. MSRs that KVM is already intercepting don't need to be
- * refreshed since KVM is going to intercept them regardless of what
- * userspace wants.
- */
- for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
- u32 msr = vmx_possible_passthrough_msrs[i];
-
- if (!test_bit(i, vmx->shadow_msr_intercept.read))
- vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
-
- if (!test_bit(i, vmx->shadow_msr_intercept.write))
- vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
- }
-
- /* PT MSRs can be passed through iff PT is exposed to the guest. */
- if (vmx_pt_mode_is_host_guest())
- pt_update_intercept_for_msr(vcpu);
-}
-
static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
int pi_vec)
{
@@ -1806,6 +1806,19 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
}
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
+int kvm_passthrough_msr_slot(u32 msr)
+{
+ u32 i;
+
+ for (i = 0; i < kvm_x86_ops.nr_possible_passthrough_msrs; i++) {
+ if (kvm_x86_ops.possible_passthrough_msrs[i] == msr)
+ return i;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(kvm_passthrough_msr_slot);
+
/*
* Write @data into the MSR specified by @index. Select MSR specific fault
* checks are bypassed if @host_initiated is %true.
@@ -555,6 +555,7 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
+int kvm_passthrough_msr_slot(u32 msr);
enum kvm_msr_access {
MSR_TYPE_R = BIT(0),
Move the possible passthrough MSRs to kvm_x86_ops. Doing this allows them to be accessed from common x86 code. In order to set the passthrough MSRs in kvm_x86_ops for VMX, "vmx_possible_passthrough_msrs" had to be relocated to main.c, and with that vmx_msr_filter_changed() had to be moved too because it uses "vmx_possible_passthrough_msrs". Signed-off-by: Sean Christopherson <seanjc@google.com> Co-developed-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/include/asm/kvm_host.h | 3 ++ arch/x86/kvm/svm/svm.c | 18 ++------- arch/x86/kvm/vmx/main.c | 58 ++++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 67 ++------------------------------- arch/x86/kvm/x86.c | 13 +++++++ arch/x86/kvm/x86.h | 1 + 6 files changed, 83 insertions(+), 77 deletions(-)