diff mbox series

[v5,2/4] arm64: KVM: add accessors to track guest/host only counters

Message ID 1544023815-16958-3-git-send-email-andrew.murray@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Support perf event modifiers :G and :H | expand

Commit Message

Andrew Murray Dec. 5, 2018, 3:30 p.m. UTC
In order to effeciently enable/disable guest/host only perf counters
at guest entry/exit we add bitfields to kvm_cpu_context for guest and
host events as well as accessors for updating them.

Signed-off-by: Andrew Murray <andrew.murray@arm.com>
---
 arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

Comments

Suzuki K Poulose Dec. 6, 2018, 5:21 p.m. UTC | #1
Hi Andrew,

On 05/12/2018 15:30, Andrew Murray wrote:
> In order to effeciently enable/disable guest/host only perf counters
> at guest entry/exit we add bitfields to kvm_cpu_context for guest and
> host events as well as accessors for updating them.
> 
> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
> ---
>   arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
>   1 file changed, 24 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 1550192..800c87b 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -203,6 +203,8 @@ struct kvm_cpu_context {
>   	};
>   
>   	struct kvm_vcpu *__hyp_running_vcpu;
> +	u32 events_host;
> +	u32 events_guest;
>   };
>   
>   typedef struct kvm_cpu_context kvm_cpu_context_t;
> @@ -467,11 +469,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
>   void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
>   void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
>   
> +#define KVM_PMU_EVENTS_HOST	1
> +#define KVM_PMU_EVENTS_GUEST	2
> +
>   #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
>   static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
>   {
>   	return kvm_arch_vcpu_run_map_fp(vcpu);
>   }
> +static inline void kvm_set_pmu_events(u32 set, int flags)
> +{
> +	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
> +
> +	if (flags & KVM_PMU_EVENTS_HOST)
> +		ctx->events_host |= set;
> +	if (flags & KVM_PMU_EVENTS_GUEST)
> +		ctx->events_guest |= set;

You seem to be passing a single flag at a time ever, in the next patch.
Either we can batch the calls in the next patch, or make this a real number and
not a flag. g.g, enum { HOST,  GUEST } or even a bool.

I think the former sounds better.

Cheers
Suzuki
Suzuki K Poulose Dec. 6, 2018, 6:51 p.m. UTC | #2
On 06/12/2018 17:21, Suzuki K Poulose wrote:
> Hi Andrew,
> 
> On 05/12/2018 15:30, Andrew Murray wrote:
>> In order to effeciently enable/disable guest/host only perf counters
>> at guest entry/exit we add bitfields to kvm_cpu_context for guest and
>> host events as well as accessors for updating them.
>>
>> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
>> ---
>>    arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
>>    1 file changed, 24 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
>> index 1550192..800c87b 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -203,6 +203,8 @@ struct kvm_cpu_context {
>>    	};
>>    
>>    	struct kvm_vcpu *__hyp_running_vcpu;
>> +	u32 events_host;
>> +	u32 events_guest;
>>    };
>>    
>>    typedef struct kvm_cpu_context kvm_cpu_context_t;
>> @@ -467,11 +469,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
>>    void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
>>    void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
>>    
>> +#define KVM_PMU_EVENTS_HOST	1
>> +#define KVM_PMU_EVENTS_GUEST	2
>> +
>>    #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
>>    static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
>>    {
>>    	return kvm_arch_vcpu_run_map_fp(vcpu);
>>    }
>> +static inline void kvm_set_pmu_events(u32 set, int flags)
>> +{
>> +	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
>> +
>> +	if (flags & KVM_PMU_EVENTS_HOST)
>> +		ctx->events_host |= set;
>> +	if (flags & KVM_PMU_EVENTS_GUEST)
>> +		ctx->events_guest |= set;
> 
> You seem to be passing a single flag at a time ever, in the next patch.
> Either we can batch the calls in the next patch, or make this a real number and
> not a flag. g.g, enum { HOST,  GUEST } or even a bool.
> 
> I think the former sounds better.

Having another look at patch-3, we can never club set() calls for host & guest 
events, we should go for the latter.

Suzuki
Andrew Murray Dec. 6, 2018, 7:21 p.m. UTC | #3
On Thu, Dec 06, 2018 at 06:51:37PM +0000, Suzuki K Poulose wrote:
> 
> 
> On 06/12/2018 17:21, Suzuki K Poulose wrote:
> > Hi Andrew,
> > 
> > On 05/12/2018 15:30, Andrew Murray wrote:
> > > In order to effeciently enable/disable guest/host only perf counters
> > > at guest entry/exit we add bitfields to kvm_cpu_context for guest and
> > > host events as well as accessors for updating them.
> > > 
> > > Signed-off-by: Andrew Murray <andrew.murray@arm.com>
> > > ---
> > >    arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
> > >    1 file changed, 24 insertions(+)
> > > 
> > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > > index 1550192..800c87b 100644
> > > --- a/arch/arm64/include/asm/kvm_host.h
> > > +++ b/arch/arm64/include/asm/kvm_host.h
> > > @@ -203,6 +203,8 @@ struct kvm_cpu_context {
> > >    	};
> > >    	struct kvm_vcpu *__hyp_running_vcpu;
> > > +	u32 events_host;
> > > +	u32 events_guest;
> > >    };
> > >    typedef struct kvm_cpu_context kvm_cpu_context_t;
> > > @@ -467,11 +469,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
> > >    void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
> > >    void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
> > > +#define KVM_PMU_EVENTS_HOST	1
> > > +#define KVM_PMU_EVENTS_GUEST	2
> > > +
> > >    #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
> > >    static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
> > >    {
> > >    	return kvm_arch_vcpu_run_map_fp(vcpu);
> > >    }
> > > +static inline void kvm_set_pmu_events(u32 set, int flags)
> > > +{
> > > +	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
> > > +
> > > +	if (flags & KVM_PMU_EVENTS_HOST)
> > > +		ctx->events_host |= set;
> > > +	if (flags & KVM_PMU_EVENTS_GUEST)
> > > +		ctx->events_guest |= set;
> > 
> > You seem to be passing a single flag at a time ever, in the next patch.
> > Either we can batch the calls in the next patch, or make this a real number and
> > not a flag. g.g, enum { HOST,  GUEST } or even a bool.
> > 
> > I think the former sounds better.
> 
> Having another look at patch-3, we can never club set() calls for host &
> guest events, we should go for the latter.

Well we could always do this..

        u32 flags = 0;
        if (!attr->exclude_host)
                flags |= KVM_PMU_EVENTS_HOST;
        if (!attr->exclude_guest)
                flags |= KVM_PMU_EVENTS_GUEST;

        kvm_set_pmu_events(counter_bits, flags);

Thanks,

Andrew Murray

> 
> Suzuki
>
Suzuki K Poulose Dec. 7, 2018, 2:08 p.m. UTC | #4
On 06/12/2018 19:21, Andrew Murray wrote:
> On Thu, Dec 06, 2018 at 06:51:37PM +0000, Suzuki K Poulose wrote:
>>
>>
>> On 06/12/2018 17:21, Suzuki K Poulose wrote:
>>> Hi Andrew,
>>>
>>> On 05/12/2018 15:30, Andrew Murray wrote:
>>>> In order to effeciently enable/disable guest/host only perf counters
>>>> at guest entry/exit we add bitfields to kvm_cpu_context for guest and
>>>> host events as well as accessors for updating them.
>>>>
>>>> Signed-off-by: Andrew Murray <andrew.murray@arm.com>
>>>> ---
>>>>     arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
>>>>     1 file changed, 24 insertions(+)
>>>>
>>>> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
>>>> index 1550192..800c87b 100644
>>>> --- a/arch/arm64/include/asm/kvm_host.h
>>>> +++ b/arch/arm64/include/asm/kvm_host.h
>>>> @@ -203,6 +203,8 @@ struct kvm_cpu_context {
>>>>     	};
>>>>     	struct kvm_vcpu *__hyp_running_vcpu;
>>>> +	u32 events_host;
>>>> +	u32 events_guest;
>>>>     };
>>>>     typedef struct kvm_cpu_context kvm_cpu_context_t;
>>>> @@ -467,11 +469,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
>>>>     void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
>>>>     void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
>>>> +#define KVM_PMU_EVENTS_HOST	1
>>>> +#define KVM_PMU_EVENTS_GUEST	2
>>>> +
>>>>     #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
>>>>     static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
>>>>     {
>>>>     	return kvm_arch_vcpu_run_map_fp(vcpu);
>>>>     }
>>>> +static inline void kvm_set_pmu_events(u32 set, int flags)
>>>> +{
>>>> +	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
>>>> +
>>>> +	if (flags & KVM_PMU_EVENTS_HOST)
>>>> +		ctx->events_host |= set;
>>>> +	if (flags & KVM_PMU_EVENTS_GUEST)
>>>> +		ctx->events_guest |= set;
>>>
>>> You seem to be passing a single flag at a time ever, in the next patch.
>>> Either we can batch the calls in the next patch, or make this a real number and
>>> not a flag. g.g, enum { HOST,  GUEST } or even a bool.
>>>
>>> I think the former sounds better.
>>
>> Having another look at patch-3, we can never club set() calls for host &
>> guest events, we should go for the latter.
> 
> Well we could always do this..
> 
>          u32 flags = 0;
>          if (!attr->exclude_host)
>                  flags |= KVM_PMU_EVENTS_HOST;
>          if (!attr->exclude_guest)
>                  flags |= KVM_PMU_EVENTS_GUEST;
> 
>          kvm_set_pmu_events(counter_bits, flags);

Yes, I think this should be fine.

Cheers
Suzuki
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 1550192..800c87b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -203,6 +203,8 @@  struct kvm_cpu_context {
 	};
 
 	struct kvm_vcpu *__hyp_running_vcpu;
+	u32 events_host;
+	u32 events_guest;
 };
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
@@ -467,11 +469,33 @@  void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
 
+#define KVM_PMU_EVENTS_HOST	1
+#define KVM_PMU_EVENTS_GUEST	2
+
 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 {
 	return kvm_arch_vcpu_run_map_fp(vcpu);
 }
+static inline void kvm_set_pmu_events(u32 set, int flags)
+{
+	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
+
+	if (flags & KVM_PMU_EVENTS_HOST)
+		ctx->events_host |= set;
+	if (flags & KVM_PMU_EVENTS_GUEST)
+		ctx->events_guest |= set;
+}
+static inline void kvm_clr_pmu_events(u32 clr)
+{
+	kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state);
+
+	ctx->events_host &= ~clr;
+	ctx->events_guest &= ~clr;
+}
+#else
+static inline void kvm_set_pmu_events(u32 set, int flags) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
 #endif
 
 static inline void kvm_arm_vhe_guest_enter(void)