@@ -781,14 +781,14 @@ static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
/* Set the shadow bitmaps to the desired intercept states */
if (read)
- __set_bit(slot, svm->shadow_msr_intercept.read);
- else
__clear_bit(slot, svm->shadow_msr_intercept.read);
+ else
+ __set_bit(slot, svm->shadow_msr_intercept.read);
if (write)
- __set_bit(slot, svm->shadow_msr_intercept.write);
- else
__clear_bit(slot, svm->shadow_msr_intercept.write);
+ else
+ __set_bit(slot, svm->shadow_msr_intercept.write);
}
static bool valid_msr_intercept(u32 index)
@@ -934,9 +934,10 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
*/
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
u32 msr = direct_access_msrs[i].index;
- u32 read = test_bit(i, svm->shadow_msr_intercept.read);
- u32 write = test_bit(i, svm->shadow_msr_intercept.write);
+ u32 read = !test_bit(i, svm->shadow_msr_intercept.read);
+ u32 write = !test_bit(i, svm->shadow_msr_intercept.write);
+ /* FIXME: Align the polarity of the bitmaps and params. */
set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
}
}
@@ -1453,6 +1454,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
goto error_free_vmsa_page;
+ /* All MSRs start out in the "intercepted" state. */
+ bitmap_fill(svm->shadow_msr_intercept.read, MAX_DIRECT_ACCESS_MSRS);
+ bitmap_fill(svm->shadow_msr_intercept.write, MAX_DIRECT_ACCESS_MSRS);
+
svm->msrpm = svm_vcpu_alloc_msrpm();
if (!svm->msrpm) {
err = -ENOMEM;
Note, a "FIXME" tag was added to svm_msr_filter_changed(). This will be addressed later in the series after the VMX style MSR intercepts are added to SVM. Signed-off-by: Sean Christopherson <seanjc@google.com> Co-developed-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/kvm/svm/svm.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-)