Message ID | 20161011184044.28373-2-cov@codeaurora.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 10/11/2016 01:40 PM, Christopher Covington wrote: > Ensure that reads of the PMCCNTR_EL0 are monotonically increasing, > even for the smallest delta of two subsequent reads. > > Signed-off-by: Christopher Covington <cov@codeaurora.org> > Reviewed-by: Andrew Jones <drjones@redhat.com> > --- > arm/pmu.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 60 insertions(+) > > diff --git a/arm/pmu.c b/arm/pmu.c > index 42d0ee1..4334de4 100644 > --- a/arm/pmu.c > +++ b/arm/pmu.c > @@ -14,6 +14,8 @@ > */ > #include "libcflat.h" > > +#define NR_SAMPLES 10 > + > #if defined(__arm__) > static inline uint32_t get_pmcr(void) > { > @@ -22,6 +24,25 @@ static inline uint32_t get_pmcr(void) > asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (ret)); > return ret; > } > + > +static inline void set_pmcr(uint32_t pmcr) > +{ > + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (pmcr)); > +} > + > +/* > + * While PMCCNTR can be accessed as a 64 bit coprocessor register, returning 64 > + * bits doesn't seem worth the trouble when differential usage of the result is > + * expected (with differences that can easily fit in 32 bits). So just return > + * the lower 32 bits of the cycle count in AArch32. > + */ > +static inline unsigned long get_pmccntr(void) > +{ > + unsigned long cycles; > + > + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); > + return cycles; > +} > #elif defined(__aarch64__) > static inline uint32_t get_pmcr(void) > { > @@ -30,6 +51,19 @@ static inline uint32_t get_pmcr(void) > asm volatile("mrs %0, pmcr_el0" : "=r" (ret)); > return ret; > } > + > +static inline void set_pmcr(uint32_t pmcr) > +{ > + asm volatile("msr pmcr_el0, %0" : : "r" (pmcr)); > +} > + > +static inline unsigned long get_pmccntr(void) > +{ > + unsigned long cycles; > + > + asm volatile("mrs %0, pmccntr_el0" : "=r" (cycles)); > + return cycles; > +} > #endif > > struct pmu_data { > @@ -72,11 +106,37 @@ static bool check_pmcr(void) > return pmu.implementer != 0; > } > > +/* > + * Ensure that the cycle counter progresses between back-to-back reads. > + */ > +static bool check_cycles_increase(void) > +{ > + struct pmu_data pmu = {0}; Compilation error on my machine: arm/pmu.c: In function ‘check_cycles_increase’: arm/pmu.c:148:9: error: missing braces around initializer [-Werror=missing-braces] struct pmu_data pmu = {0}; Same for Patch 3. > + > + pmu.enable = 1; > + set_pmcr(pmu.pmcr_el0); > + > + for (int i = 0; i < NR_SAMPLES; i++) { > + unsigned long a, b; > + > + a = get_pmccntr(); > + b = get_pmccntr(); > + > + if (a >= b) { > + printf("Read %ld then %ld.\n", a, b); > + return false; > + } > + } > + > + return true; > +} > + > int main(void) > { > report_prefix_push("pmu"); > > report("Control register", check_pmcr()); > + report("Monotonically increasing cycle count", check_cycles_increase()); > > return report_summary(); > } >
Hi Wei, On 10/12/2016 11:49 AM, Wei Huang wrote: > On 10/11/2016 01:40 PM, Christopher Covington wrote: >> Ensure that reads of the PMCCNTR_EL0 are monotonically increasing, >> even for the smallest delta of two subsequent reads. >> >> Signed-off-by: Christopher Covington <cov@codeaurora.org> >> Reviewed-by: Andrew Jones <drjones@redhat.com> >> --- >> arm/pmu.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 60 insertions(+) >> >> diff --git a/arm/pmu.c b/arm/pmu.c >> index 42d0ee1..4334de4 100644 >> --- a/arm/pmu.c >> +++ b/arm/pmu.c >> @@ -14,6 +14,8 @@ >> */ >> #include "libcflat.h" >> >> +#define NR_SAMPLES 10 >> + >> #if defined(__arm__) >> static inline uint32_t get_pmcr(void) >> { >> @@ -22,6 +24,25 @@ static inline uint32_t get_pmcr(void) >> asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (ret)); >> return ret; >> } >> + >> +static inline void set_pmcr(uint32_t pmcr) >> +{ >> + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (pmcr)); >> +} >> + >> +/* >> + * While PMCCNTR can be accessed as a 64 bit coprocessor register, returning 64 >> + * bits doesn't seem worth the trouble when differential usage of the result is >> + * expected (with differences that can easily fit in 32 bits). So just return >> + * the lower 32 bits of the cycle count in AArch32. >> + */ >> +static inline unsigned long get_pmccntr(void) >> +{ >> + unsigned long cycles; >> + >> + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); >> + return cycles; >> +} >> #elif defined(__aarch64__) >> static inline uint32_t get_pmcr(void) >> { >> @@ -30,6 +51,19 @@ static inline uint32_t get_pmcr(void) >> asm volatile("mrs %0, pmcr_el0" : "=r" (ret)); >> return ret; >> } >> + >> +static inline void set_pmcr(uint32_t pmcr) >> +{ >> + asm volatile("msr pmcr_el0, %0" : : "r" (pmcr)); >> +} >> + >> +static inline unsigned long get_pmccntr(void) >> +{ >> + unsigned long cycles; >> + >> + asm volatile("mrs %0, pmccntr_el0" : "=r" (cycles)); >> + return cycles; >> +} >> #endif >> >> struct pmu_data { >> @@ -72,11 +106,37 @@ static bool check_pmcr(void) >> return pmu.implementer != 0; >> } >> >> +/* >> + * Ensure that the cycle counter progresses between back-to-back reads. >> + */ >> +static bool check_cycles_increase(void) >> +{ >> + struct pmu_data pmu = {0}; > > Compilation error on my machine: > > arm/pmu.c: In function ‘check_cycles_increase’: > arm/pmu.c:148:9: error: missing braces around initializer > [-Werror=missing-braces] > struct pmu_data pmu = {0}; > > Same for Patch 3. "...So your compiler complains about {0}? Is there a problem besides the warning? If not, then I'm still a bit inclined to keep the code neat. The warnings will go away with compiler updates." https://lists.gnu.org/archive/html/qemu-devel/2015-10/msg06064.html Thanks, Cov
On 10/12/2016 01:10 PM, Christopher Covington wrote: > Hi Wei, > > On 10/12/2016 11:49 AM, Wei Huang wrote: >> On 10/11/2016 01:40 PM, Christopher Covington wrote: >>> Ensure that reads of the PMCCNTR_EL0 are monotonically increasing, >>> even for the smallest delta of two subsequent reads. >>> >>> Signed-off-by: Christopher Covington <cov@codeaurora.org> >>> Reviewed-by: Andrew Jones <drjones@redhat.com> >>> --- >>> arm/pmu.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ >>> 1 file changed, 60 insertions(+) >>> >>> diff --git a/arm/pmu.c b/arm/pmu.c >>> index 42d0ee1..4334de4 100644 >>> --- a/arm/pmu.c >>> +++ b/arm/pmu.c >>> @@ -14,6 +14,8 @@ >>> */ >>> #include "libcflat.h" >>> >>> +#define NR_SAMPLES 10 >>> + >>> #if defined(__arm__) >>> static inline uint32_t get_pmcr(void) >>> { >>> @@ -22,6 +24,25 @@ static inline uint32_t get_pmcr(void) >>> asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (ret)); >>> return ret; >>> } >>> + >>> +static inline void set_pmcr(uint32_t pmcr) >>> +{ >>> + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (pmcr)); >>> +} >>> + >>> +/* >>> + * While PMCCNTR can be accessed as a 64 bit coprocessor register, returning 64 >>> + * bits doesn't seem worth the trouble when differential usage of the result is >>> + * expected (with differences that can easily fit in 32 bits). So just return >>> + * the lower 32 bits of the cycle count in AArch32. >>> + */ >>> +static inline unsigned long get_pmccntr(void) >>> +{ >>> + unsigned long cycles; >>> + >>> + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); >>> + return cycles; >>> +} >>> #elif defined(__aarch64__) >>> static inline uint32_t get_pmcr(void) >>> { >>> @@ -30,6 +51,19 @@ static inline uint32_t get_pmcr(void) >>> asm volatile("mrs %0, pmcr_el0" : "=r" (ret)); >>> return ret; >>> } >>> + >>> +static inline void set_pmcr(uint32_t pmcr) >>> +{ >>> + asm volatile("msr pmcr_el0, %0" : : "r" (pmcr)); >>> +} >>> + >>> +static inline unsigned long get_pmccntr(void) >>> +{ >>> + unsigned long cycles; >>> + >>> + asm volatile("mrs %0, pmccntr_el0" : "=r" (cycles)); >>> + return cycles; >>> +} >>> #endif >>> >>> struct pmu_data { >>> @@ -72,11 +106,37 @@ static bool check_pmcr(void) >>> return pmu.implementer != 0; >>> } >>> >>> +/* >>> + * Ensure that the cycle counter progresses between back-to-back reads. >>> + */ >>> +static bool check_cycles_increase(void) >>> +{ >>> + struct pmu_data pmu = {0}; >> >> Compilation error on my machine: >> >> arm/pmu.c: In function ‘check_cycles_increase’: >> arm/pmu.c:148:9: error: missing braces around initializer >> [-Werror=missing-braces] >> struct pmu_data pmu = {0}; >> >> Same for Patch 3. > > "...So your compiler complains about {0}? Is there a problem besides the > warning? If not, then I'm still a bit inclined to keep the code neat. The > warnings will go away with compiler updates." Indeed my stock GCC compiler is a bit old - 4.8.5; newer version can fix it. But note this a compilation error which prevents the binary from being built and will last a while. Could we use double-braces, pmu = {{0}}, as a solution? -Wei > > https://lists.gnu.org/archive/html/qemu-devel/2015-10/msg06064.html > > Thanks, > Cov >
diff --git a/arm/pmu.c b/arm/pmu.c index 42d0ee1..4334de4 100644 --- a/arm/pmu.c +++ b/arm/pmu.c @@ -14,6 +14,8 @@ */ #include "libcflat.h" +#define NR_SAMPLES 10 + #if defined(__arm__) static inline uint32_t get_pmcr(void) { @@ -22,6 +24,25 @@ static inline uint32_t get_pmcr(void) asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (ret)); return ret; } + +static inline void set_pmcr(uint32_t pmcr) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (pmcr)); +} + +/* + * While PMCCNTR can be accessed as a 64 bit coprocessor register, returning 64 + * bits doesn't seem worth the trouble when differential usage of the result is + * expected (with differences that can easily fit in 32 bits). So just return + * the lower 32 bits of the cycle count in AArch32. + */ +static inline unsigned long get_pmccntr(void) +{ + unsigned long cycles; + + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); + return cycles; +} #elif defined(__aarch64__) static inline uint32_t get_pmcr(void) { @@ -30,6 +51,19 @@ static inline uint32_t get_pmcr(void) asm volatile("mrs %0, pmcr_el0" : "=r" (ret)); return ret; } + +static inline void set_pmcr(uint32_t pmcr) +{ + asm volatile("msr pmcr_el0, %0" : : "r" (pmcr)); +} + +static inline unsigned long get_pmccntr(void) +{ + unsigned long cycles; + + asm volatile("mrs %0, pmccntr_el0" : "=r" (cycles)); + return cycles; +} #endif struct pmu_data { @@ -72,11 +106,37 @@ static bool check_pmcr(void) return pmu.implementer != 0; } +/* + * Ensure that the cycle counter progresses between back-to-back reads. + */ +static bool check_cycles_increase(void) +{ + struct pmu_data pmu = {0}; + + pmu.enable = 1; + set_pmcr(pmu.pmcr_el0); + + for (int i = 0; i < NR_SAMPLES; i++) { + unsigned long a, b; + + a = get_pmccntr(); + b = get_pmccntr(); + + if (a >= b) { + printf("Read %ld then %ld.\n", a, b); + return false; + } + } + + return true; +} + int main(void) { report_prefix_push("pmu"); report("Control register", check_pmcr()); + report("Monotonically increasing cycle count", check_cycles_increase()); return report_summary(); }