@@ -192,19 +192,22 @@ static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
return f;
}
-
static struct kvm_pmu_event_filter *
create_pmu_event_filter(const uint64_t event_list[], uint32_t nevents,
- uint32_t action, uint32_t flags)
+ uint32_t action, uint32_t flags,
+ uint32_t fixed_counter_bitmap)
{
struct kvm_pmu_event_filter *f;
int i;
f = alloc_pmu_event_filter(nevents);
f->action = action;
+ f->fixed_counter_bitmap = fixed_counter_bitmap;
f->flags = flags;
- for (i = 0; i < nevents; i++)
- f->events[i] = event_list[i];
+ if (f->nevents) {
+ for (i = 0; i < f->nevents; i++)
+ f->events[i] = event_list[i];
+ }
return f;
}
@@ -213,7 +216,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action)
{
return create_pmu_event_filter(event_list,
ARRAY_SIZE(event_list),
- action, 0);
+ action, 0, 0);
}
/*
@@ -260,7 +263,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu)
struct kvm_pmu_event_filter *f;
uint64_t count;
- f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
+ f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0, 0);
count = test_with_filter(vcpu, f);
free(f);
@@ -544,7 +547,7 @@ static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu,
f = create_pmu_event_filter(masked_events, nmasked_events,
KVM_PMU_EVENT_ALLOW,
- KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS, 0);
r.raw = test_with_filter(vcpu, f);
free(f);
@@ -726,12 +729,14 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
}
static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
- uint32_t nevents, uint32_t flags)
+ uint32_t nevents, uint32_t flags, uint32_t action,
+ uint32_t fixed_counter_bitmap)
{
struct kvm_pmu_event_filter *f;
int r;
- f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
+ f = create_pmu_event_filter(events, nevents, action, flags,
+ fixed_counter_bitmap);
r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
free(f);
@@ -747,14 +752,16 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
* Unfortunately having invalid bits set in event data is expected to
* pass when flags == 0 (bits other than eventsel+umask).
*/
- r = run_filter_test(vcpu, &e, 1, 0);
+ r = run_filter_test(vcpu, &e, 1, 0, KVM_PMU_EVENT_ALLOW, 0);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW, 0);
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW, 0);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
}