From patchwork Tue Dec 6 18:05:28 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wei Huang X-Patchwork-Id: 9463041 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 2DF5E60231 for ; Tue, 6 Dec 2016 18:06:23 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 22EA5284A1 for ; Tue, 6 Dec 2016 18:06:23 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 171F0284A4; Tue, 6 Dec 2016 18:06:23 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 65344284A6 for ; Tue, 6 Dec 2016 18:06:22 +0000 (UTC) Received: from localhost ([::1]:33725 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cEK8D-0006uK-1V for patchwork-qemu-devel@patchwork.kernel.org; Tue, 06 Dec 2016 13:06:21 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:47515) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cEK7e-0006rA-Be for qemu-devel@nongnu.org; Tue, 06 Dec 2016 13:05:47 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1cEK7b-0005aO-0s for qemu-devel@nongnu.org; Tue, 06 Dec 2016 13:05:46 -0500 Received: from mx1.redhat.com ([209.132.183.28]:38232) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1cEK7a-0005a1-NX for qemu-devel@nongnu.org; Tue, 06 Dec 2016 13:05:42 -0500 Received: from int-mx14.intmail.prod.int.phx2.redhat.com (int-mx14.intmail.prod.int.phx2.redhat.com [10.5.11.27]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id C2A698F511; Tue, 6 Dec 2016 18:05:41 +0000 (UTC) Received: from weilaptop.redhat.com (vpn-62-139.rdu2.redhat.com [10.10.62.139]) by int-mx14.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id uB6I5TOi002456; Tue, 6 Dec 2016 13:05:39 -0500 From: Wei Huang To: cov@codeaurora.org Date: Tue, 6 Dec 2016 12:05:28 -0600 Message-Id: <1481047528-16180-6-git-send-email-wei@redhat.com> In-Reply-To: <1481047528-16180-1-git-send-email-wei@redhat.com> References: <1481047528-16180-1-git-send-email-wei@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.27 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.28]); Tue, 06 Dec 2016 18:05:41 +0000 (UTC) X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] [fuzzy] X-Received-From: 209.132.183.28 Subject: [Qemu-devel] [kvm-unit-tests PATCH v14 5/5] arm: pmu: Add CPI checking X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: alindsay@codeaurora.org, drjones@redhat.com, kvm@vger.kernel.org, andre.przywara@arm.com, croberts@codeaurora.org, qemu-devel@nongnu.org, alistair.francis@xilinx.com, shannon.zhao@linaro.org, kvmarm@lists.cs.columbia.edu Errors-To: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Sender: "Qemu-devel" X-Virus-Scanned: ClamAV using ClamSMTP From: Christopher Covington Calculate the numbers of cycles per instruction (CPI) implied by ARM PMU cycle counter values. The code includes a strict checking facility intended for the -icount option in TCG mode in the configuration file. Signed-off-by: Christopher Covington Signed-off-by: Wei Huang --- arm/pmu.c | 133 +++++++++++++++++++++++++++++++++++++++++++++++++++++- arm/unittests.cfg | 14 ++++++ 2 files changed, 146 insertions(+), 1 deletion(-) diff --git a/arm/pmu.c b/arm/pmu.c index d9ff19d..a39dae4 100644 --- a/arm/pmu.c +++ b/arm/pmu.c @@ -58,6 +58,14 @@ static inline uint64_t get_pmccntr(void) return read_sysreg(PMCCNTR32); } +static inline void set_pmccntr(uint64_t value) +{ + if (pmu_version == 0x3) + write_sysreg(value, PMCCNTR64); + else + write_sysreg(value & 0xffffffff, PMCCNTR32); +} + /* PMCCFILTR is an obsolete name for PMXEVTYPER31 in ARMv7 */ static inline void set_pmccfiltr(uint32_t value) { @@ -65,13 +73,56 @@ static inline void set_pmccfiltr(uint32_t value) write_sysreg(value, PMXEVTYPER); isb(); } + +/* + * Extra instructions inserted by the compiler would be difficult to compensate + * for, so hand assemble everything between, and including, the PMCR accesses + * to start and stop counting. isb instructions were inserted to make sure + * pmccntr read after this function returns the exact instructions executed in + * the controlled block. Total instrs = isb + mcr + 2*loop = 2 + 2*loop. + */ +static inline void precise_instrs_loop(int loop, uint32_t pmcr) +{ + asm volatile( + " mcr p15, 0, %[pmcr], c9, c12, 0\n" + " isb\n" + "1: subs %[loop], %[loop], #1\n" + " bgt 1b\n" + " mcr p15, 0, %[z], c9, c12, 0\n" + " isb\n" + : [loop] "+r" (loop) + : [pmcr] "r" (pmcr), [z] "r" (0) + : "cc"); +} #elif defined(__aarch64__) static inline uint32_t get_id_dfr0(void) { return read_sysreg(id_dfr0_el1); } static inline uint32_t get_pmcr(void) { return read_sysreg(pmcr_el0); } static inline void set_pmcr(uint32_t v) { write_sysreg(v, pmcr_el0); } static inline uint64_t get_pmccntr(void) { return read_sysreg(pmccntr_el0); } +static inline void set_pmccntr(uint64_t v) { write_sysreg(v, pmccntr_el0); } static inline void set_pmcntenset(uint32_t v) { write_sysreg(v, pmcntenset_el0); } static inline void set_pmccfiltr(uint32_t v) { write_sysreg(v, pmccfiltr_el0); } + +/* + * Extra instructions inserted by the compiler would be difficult to compensate + * for, so hand assemble everything between, and including, the PMCR accesses + * to start and stop counting. isb instructions are inserted to make sure + * pmccntr read after this function returns the exact instructions executed + * in the controlled block. Total instrs = isb + msr + 2*loop = 2 + 2*loop. + */ +static inline void precise_instrs_loop(int loop, uint32_t pmcr) +{ + asm volatile( + " msr pmcr_el0, %[pmcr]\n" + " isb\n" + "1: subs %[loop], %[loop], #1\n" + " b.gt 1b\n" + " msr pmcr_el0, xzr\n" + " isb\n" + : [loop] "+r" (loop) + : [pmcr] "r" (pmcr) + : "cc"); +} #endif /* @@ -128,6 +179,80 @@ static bool check_cycles_increase(void) return success; } +/* + * Execute a known number of guest instructions. Only even instruction counts + * greater than or equal to 4 are supported by the in-line assembly code. The + * control register (PMCR_EL0) is initialized with the provided value (allowing + * for example for the cycle counter or event counters to be reset). At the end + * of the exact instruction loop, zero is written to PMCR_EL0 to disable + * counting, allowing the cycle counter or event counters to be read at the + * leisure of the calling code. + */ +static void measure_instrs(int num, uint32_t pmcr) +{ + int loop = (num - 2) / 2; + + assert(num >= 4 && ((num - 2) % 2 == 0)); + precise_instrs_loop(loop, pmcr); +} + +/* + * Measure cycle counts for various known instruction counts. Ensure that the + * cycle counter progresses (similar to check_cycles_increase() but with more + * instructions and using reset and stop controls). If supplied a positive, + * nonzero CPI parameter, it also strictly checks that every measurement matches + * it. Strict CPI checking is used to test -icount mode. + */ +static bool check_cpi(int cpi) +{ + uint32_t pmcr = get_pmcr() | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_E; + + /* init before event access, this test only cares about cycle count */ + set_pmcntenset(1 << PMU_CYCLE_IDX); + set_pmccfiltr(0); /* count cycles in EL0, EL1, but not EL2 */ + + if (cpi > 0) + printf("Checking for CPI=%d.\n", cpi); + printf("instrs : cycles0 cycles1 ...\n"); + + for (unsigned int i = 4; i < 300; i += 32) { + uint64_t avg, sum = 0; + + printf("%4d:", i); + for (int j = 0; j < NR_SAMPLES; j++) { + uint64_t cycles; + + set_pmccntr(0); + measure_instrs(i, pmcr); + cycles = get_pmccntr(); + printf(" %4"PRId64"", cycles); + + if (!cycles) { + printf("\ncycles not incrementing!\n"); + return false; + } else if (cpi > 0 && cycles != i * cpi) { + printf("\nunexpected cycle count received!\n"); + return false; + } else if ((cycles >> 32) != 0) { + /* The cycles taken by the loop above should + * fit in 32 bits easily. We check the upper + * 32 bits of the cycle counter to make sure + * there is no supprise. */ + printf("\ncycle count bigger than 32bit!\n"); + return false; + } + + sum += cycles; + } + avg = sum / NR_SAMPLES; + printf(" avg=%-4"PRId64" %s=%-3"PRId64"\n", avg, + (avg >= i) ? "cpi" : "ipc", + (avg >= i) ? avg / i : i / avg); + } + + return true; +} + /* Return FALSE if no PMU found, otherwise return TRUE */ bool pmu_probe(void) { @@ -143,8 +268,13 @@ bool pmu_probe(void) return pmu_version; } -int main(void) +int main(int argc, char *argv[]) { + int cpi = 0; + + if (argc > 1) + cpi = atol(argv[1]); + if (!pmu_probe()) { printf("No PMU found, test skipped...\n"); return report_summary(); @@ -154,6 +284,7 @@ int main(void) report("Control register", check_pmcr()); report("Monotonically increasing cycle count", check_cycles_increase()); + report("Cycle/instruction ratio", check_cpi(cpi)); return report_summary(); } diff --git a/arm/unittests.cfg b/arm/unittests.cfg index 816f494..044d97c 100644 --- a/arm/unittests.cfg +++ b/arm/unittests.cfg @@ -63,3 +63,17 @@ groups = pci [pmu] file = pmu.flat groups = pmu + +# Test PMU support (TCG) with -icount IPC=1 +[pmu-tcg-icount-1] +file = pmu.flat +extra_params = -icount 0 -append '1' +groups = pmu +accel = tcg + +# Test PMU support (TCG) with -icount IPC=256 +[pmu-tcg-icount-256] +file = pmu.flat +extra_params = -icount 8 -append '256' +groups = pmu +accel = tcg