From patchwork Mon Jul 18 17:01:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921479 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3062FCCA482 for ; Mon, 18 Jul 2022 17:05:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235726AbiGRRFr (ORCPT ); Mon, 18 Jul 2022 13:05:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57232 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235660AbiGRRFq (ORCPT ); Mon, 18 Jul 2022 13:05:46 -0400 Received: from mail-pg1-x529.google.com (mail-pg1-x529.google.com [IPv6:2607:f8b0:4864:20::529]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 504662BB12 for ; Mon, 18 Jul 2022 10:05:45 -0700 (PDT) Received: by mail-pg1-x529.google.com with SMTP id h132so11121945pgc.10 for ; Mon, 18 Jul 2022 10:05:45 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=8D/oi8qgxVglaE+KkXtnFYaHMrc21lTodlCKwN8q0IQ=; b=6NTHNWDkGfN7I+U3W+qmMcN0LlUVIhzQVfmatvueYDhm8xc3F4XrNe3HMFoK47IaeP wezuL4aRYBbBQbfcTX6JLbwTLYCb6mq25SW67gKOZRSMVOOn9ugx9Du/yLIya1jEXiGa VaLqwovf483nx9vII+fm0YIOZKj3Je/TzzINbqEZBo+KvLTzAQ79d3DtpvrydepRM+i5 YsD2Eidv3FO5D50/wsetAe86PBnZdrehL4o2bdHfVEgSmy+pVZKLQ4n4yt4wPXyejx2Q yZkNzkd/0jDcSycMCWUv6lNEvloqo4BB9asMOgTq7RRA5XOLGZGz8j+W2LKSmBcQCAU9 k4aQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=8D/oi8qgxVglaE+KkXtnFYaHMrc21lTodlCKwN8q0IQ=; b=GLRWuwAFFsxugaCMTIiPTBJ3Jmtd4MYu0jDTUgskz86fur2VZHzt3mz6buSlNru2ue 91FGDJUbAvuX3cV1/eBAY9VyqN3oUA0ylZgQcgqLJFzBzJkE7qGLwxzgwWdkBv61T4qU wKCwkpDC/hh85Mhu6lI4cgmw0DjSfTtMUeCPwwgF7IQ76tKw6LyurqYexQPo29EBgap3 +4bdV+3bbZGCXtICIguwbLuV9NXZrNASrBpTK43ypNnNzFITnc0NXraq8cRee9LQ9DdX 1xLqzGAa6T2p7VZIsTx+vmBAk+e+mr3Wb5nBCDlaz0LS1ds/Qz9S/cysVnz4syFUmuMU s/vQ== X-Gm-Message-State: AJIora+KDjsasLnJwQtLPutXfTixjEV7KuvwZFbHnLszeyQnxlvvGwmP MrDNwb538WvXkv8GNZ/jTysSVw== X-Google-Smtp-Source: AGRyM1vFaRliljxiyhBObWT8RV5lv8cN9afUQtCB8wCNEDbktpQqD63FFaGhCAEBugkoEzCPluKhgw== X-Received: by 2002:a63:ec15:0:b0:412:6fb4:88fb with SMTP id j21-20020a63ec15000000b004126fb488fbmr24847293pgh.49.1658163944724; Mon, 18 Jul 2022 10:05:44 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.43 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:44 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 1/9] RISC-V: Define a helper function to probe number of hardware counters Date: Mon, 18 Jul 2022 10:01:57 -0700 Message-Id: <20220718170205.2972215-2-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org KVM module needs to know how many hardware counters the platform supports. Otherwise, it will not be able to show optimal value of virtual counters to the guest. Signed-off-by: Atish Patra --- drivers/perf/riscv_pmu_sbi.c | 23 +++++++++++++++++------ include/linux/perf/riscv_pmu.h | 4 ++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 24124546844c..1723af68ffa1 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -27,6 +27,7 @@ */ static union sbi_pmu_ctr_info *pmu_ctr_list; static unsigned int riscv_pmu_irq; +static struct riscv_pmu *rvpmu; struct sbi_pmu_event_data { union { @@ -227,6 +228,12 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M }, }; +int riscv_pmu_sbi_get_num_hw_ctrs(void) +{ + return rvpmu ? rvpmu->num_hw_counters : 0; +} +EXPORT_SYMBOL(riscv_pmu_sbi_get_num_hw_ctrs); + static int pmu_sbi_ctr_get_width(int idx) { return pmu_ctr_list[idx].width; @@ -443,7 +450,7 @@ static int pmu_sbi_find_num_ctrs(void) return sbi_err_map_linux_errno(ret.error); } -static int pmu_sbi_get_ctrinfo(int nctr) +static int pmu_sbi_get_ctrinfo(int nctr, int *num_hw_ctrs) { struct sbiret ret; int i, num_hw_ctr = 0, num_fw_ctr = 0; @@ -453,7 +460,7 @@ static int pmu_sbi_get_ctrinfo(int nctr) if (!pmu_ctr_list) return -ENOMEM; - for (i = 0; i <= nctr; i++) { + for (i = 0; i < nctr; i++) { ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); if (ret.error) /* The logical counter ids are not expected to be contiguous */ @@ -466,6 +473,7 @@ static int pmu_sbi_get_ctrinfo(int nctr) pmu_ctr_list[i].value = cinfo.value; } + *num_hw_ctrs = num_hw_ctr; pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr); return 0; @@ -698,7 +706,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde static int pmu_sbi_device_probe(struct platform_device *pdev) { struct riscv_pmu *pmu = NULL; - int num_counters; + int num_counters, num_hw_ctrs = 0; int ret = -ENODEV; pr_info("SBI PMU extension is available\n"); @@ -713,7 +721,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) } /* cache all the information about counters now */ - if (pmu_sbi_get_ctrinfo(num_counters)) + if (pmu_sbi_get_ctrinfo(num_counters, &num_hw_ctrs)) goto out_free; ret = pmu_sbi_setup_irqs(pmu, pdev); @@ -723,6 +731,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; } pmu->num_counters = num_counters; + pmu->num_hw_counters = num_hw_ctrs; pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_stop = pmu_sbi_ctr_stop; pmu->event_map = pmu_sbi_event_map; @@ -733,14 +742,16 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); if (ret) - return ret; + goto out_free; ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); if (ret) { cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); - return ret; + goto out_free; } + rvpmu = pmu; + return 0; out_free: diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 46f9b6fe306e..fc47167e000c 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -46,6 +46,7 @@ struct riscv_pmu { irqreturn_t (*handle_irq)(int irq_num, void *dev); int num_counters; + int num_hw_counters; u64 (*ctr_read)(struct perf_event *event); int (*ctr_get_idx)(struct perf_event *event); int (*ctr_get_width)(int idx); @@ -69,6 +70,9 @@ void riscv_pmu_legacy_skip_init(void); static inline void riscv_pmu_legacy_skip_init(void) {}; #endif struct riscv_pmu *riscv_pmu_alloc(void); +#ifdef CONFIG_RISCV_PMU_SBI +int riscv_pmu_sbi_get_num_hw_ctrs(void); +#endif #endif /* CONFIG_RISCV_PMU */ From patchwork Mon Jul 18 17:01:58 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921480 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6B93DC43334 for ; Mon, 18 Jul 2022 17:05:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235500AbiGRRFt (ORCPT ); Mon, 18 Jul 2022 13:05:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57292 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235702AbiGRRFr (ORCPT ); Mon, 18 Jul 2022 13:05:47 -0400 Received: from mail-pj1-x1031.google.com (mail-pj1-x1031.google.com [IPv6:2607:f8b0:4864:20::1031]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 833572BB12 for ; Mon, 18 Jul 2022 10:05:46 -0700 (PDT) Received: by mail-pj1-x1031.google.com with SMTP id q5-20020a17090a304500b001efcc885cc4so13234101pjl.4 for ; Mon, 18 Jul 2022 10:05:46 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=H7ChdRBKS+9A003EiQbM2SMDrKCCYQePnInXhSsyMB0=; b=2V7H54kc+3zNv1TcuveEZhkWNgIt1ocwR1qZtOLzfciyvULmAUYAVdSYvDM/LMqPW7 B2K1L+wSiEDZFuMDMucw3/YkGg89OijRh8dtnIplDxavoLr+HLhF1RSyQXxCnRG/72Tf KHeNwRoBhGw0rXQdnYIWadk5aSrZwLj7I/0cV0ZkgQrHx0ezw5iLQkZg1Uak7Z5I0sqE qFLiHdxohtP+MR29CMLuGTlZRid3ndTvwyv2IFhPeeJPuBbWKdNqbpz4KKb8bRDh9SWd v8nTz6wqG+l9bP6qNs15D6dX6qH+nOrr4SmzAUcKI22h5JluwKsyWFI5ud4IQSpY+eeR W4Xg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=H7ChdRBKS+9A003EiQbM2SMDrKCCYQePnInXhSsyMB0=; b=w71mIMAato6quQ/ItPvv3mw0fN/8RsrAq+q3w/EFhEtSq/1hWt7zRTmnEw6Pp3l4Jj FBP/vsTvuqY9i9XQg5GLs4b1dZjM5rhG/0ViS0LMl902/Vg4p2D9Y+G1TMkNkk0cNTRH 4rkTCl0keGEwnG5Aw52J2VCuW7GcRilZqNHjJlB1h41w0gomNjXJHx7bUrGQvq3N529l JeuaAbXr0OcovZO5LEM0aUuY8qgEyDILYhltF/gvntYQfC4lc+Y/t7LQDAxxZWVmJ5Br HrbZNhttfXyy55Gz3musDlRaps+cdRemPMaVWUqpCsnItZ7pH2UwnxynXrrNBC2pyz8b 5XoA== X-Gm-Message-State: AJIora82F6SjmmtABFVu7mboJjFwalQk+Mms/RLqwcjkYGYOD48XjHbL +F0saLs0jztpmyrGOtJNXgURtw== X-Google-Smtp-Source: AGRyM1uvFkWFiSwIv2bxiYnsB/Me0O2umAIdMXnBAJupJyduccoZ2CTvIm6kzqVVu3DQdq7Cn7s+hQ== X-Received: by 2002:a17:902:a413:b0:156:15b:524a with SMTP id p19-20020a170902a41300b00156015b524amr30012463plq.106.1658163945893; Mon, 18 Jul 2022 10:05:45 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.44 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:45 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 2/9] RISC-V: Define a helper function to return counter width Date: Mon, 18 Jul 2022 10:01:58 -0700 Message-Id: <20220718170205.2972215-3-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org The virtual hardware counters need to have the same width as the logical hardware counters for simplicity. However, there shouldn't be mapping between virtual hardware counters and logical hardware counters. As we don't support hetergeneous harts or counters with different width as of now, the implementation relies on the counter width of the first available programmable counter. Signed-off-by: Atish Patra --- drivers/perf/riscv_pmu_sbi.c | 25 +++++++++++++++++++++++++ include/linux/perf/riscv_pmu.h | 1 + 2 files changed, 26 insertions(+) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 1723af68ffa1..5d0eef3ef136 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -250,6 +250,31 @@ static bool pmu_sbi_ctr_is_fw(int cidx) return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; } +/* + * Returns the counter width of a programmable counter + * As we don't support heterneous CPUs yet, it is okay to just + * return the counter width of the first programmable counter. + */ +int riscv_pmu_sbi_hpmc_width(void) +{ + int i; + union sbi_pmu_ctr_info *info; + + if (!rvpmu) + return -EINVAL; + + for (i = 0; i < rvpmu->num_counters; i++) { + info = &pmu_ctr_list[i]; + if (!info) + continue; + if (info->type == SBI_PMU_CTR_TYPE_HW) + return info->width; + } + + return 0; +} +EXPORT_SYMBOL(riscv_pmu_sbi_hpmc_width); + static int pmu_sbi_ctr_get_idx(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index fc47167e000c..6fee211c27b5 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -72,6 +72,7 @@ static inline void riscv_pmu_legacy_skip_init(void) {}; struct riscv_pmu *riscv_pmu_alloc(void); #ifdef CONFIG_RISCV_PMU_SBI int riscv_pmu_sbi_get_num_hw_ctrs(void); +int riscv_pmu_sbi_hpmc_width(void); #endif #endif /* CONFIG_RISCV_PMU */ From patchwork Mon Jul 18 17:01:59 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921481 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A7068C433EF for ; Mon, 18 Jul 2022 17:05:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231367AbiGRRFu (ORCPT ); Mon, 18 Jul 2022 13:05:50 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57232 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235629AbiGRRFs (ORCPT ); Mon, 18 Jul 2022 13:05:48 -0400 Received: from mail-pj1-x1031.google.com (mail-pj1-x1031.google.com [IPv6:2607:f8b0:4864:20::1031]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BDF7A2BB03 for ; Mon, 18 Jul 2022 10:05:47 -0700 (PDT) Received: by mail-pj1-x1031.google.com with SMTP id q5-20020a17090a304500b001efcc885cc4so13234173pjl.4 for ; Mon, 18 Jul 2022 10:05:47 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=yTc6DPtRL3G/t0xkTGIGozwG47F+csFvT/B18Fnd6bA=; b=FzhoHCa1YsaXYQ8x7nWgTmqf0KgVR4ynTwb6sSm5eIdHHC8iMhZW6RP3czhrR5n8g/ X2JKnvYBb1H58qTMZKXWi9CM1ZQO1mXLe07lVJd/+LwRh8tfBcmzbSFcotIZJhPLOUWu aehAPylHZQU7yz4IYfdL02Wz2JwbZBANg72NQqo5gHm2pA6TKHd81Ze1FoShkQs4WMAt J+F55ANKBQ+2NGW0+hnosnafLochQp/Rt3MrI+yMQtV7Uur5M1fuFzysZl9w/7EcuczN G2ZkIM824pXrYATCqnGXtVd4QVts+VWdN9ZLCDXaCjhqlcydUTORkG9xZukPNK7yHSzu CVBQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=yTc6DPtRL3G/t0xkTGIGozwG47F+csFvT/B18Fnd6bA=; b=YGNUWCXNFdoQ7OYWq2rgEifT/pDb3pVM2BGEd1e2rwtw0CVyQTgb7UPwY4XsVg3wmA OYDbTRYqU+oFxEd06IJ8QWCvsyh25E7YTT7WzTSZWW4qHFhc+JVtLgA5CAgwn0xtKVJ/ Z6Eh9LP/afHoMO63Af61mB7nTtupThx8aa6ONh98QPtfhJcUHEsbcpXYAX8eTdeVz94W 6Zvx1W/RbHctaI/h8FZ/2YfCc0u3KLnjuuZ8oNBzfhYS0DSJz9iFuy5GakesnsbED0AE pC59CcRCX2FLLiUfq8NY4zr2Qc3CIJskXYF9nbQm5+2INLrn0R8UmmFgQGRWe/Fg3M/P l82g== X-Gm-Message-State: AJIora8UEicb5pcbV5h6Zh5jNwFmgeJCcYs7FXOYAz7b2zB97F6X4u1j b69B7iw2AzWYQ6iAdtJpjPoLmg== X-Google-Smtp-Source: AGRyM1vW5t5W2twVvakvKZTko7lrN5ufYsRsPOvNvHFZUUuebHERK+y/x9Mil/Ly2FlTRRbMLZDwYg== X-Received: by 2002:a17:90a:ca14:b0:1f1:664a:241 with SMTP id x20-20020a17090aca1400b001f1664a0241mr17356543pjt.184.1658163947139; Mon, 18 Jul 2022 10:05:47 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.46 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:46 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 3/9] RISC-V: KVM: Define a probe function for SBI extension data structures Date: Mon, 18 Jul 2022 10:01:59 -0700 Message-Id: <20220718170205.2972215-4-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org c,urrently the probe function just check if an SBI extension is registered or not. However, the extension may not want to advertise itself depending on some other condition. An additional extension specific probe function will allow extensions to decide if they want to be advertised to the caller or not. Any extension that do not require additional dependency check does not required to implement this function. Signed-off-by: Atish Patra Reviewed-by: Andrew Jones --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 3 +++ arch/riscv/kvm/vcpu_sbi_base.c | 13 +++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 83d6d4d2b1df..5853a1ef71ea 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -25,6 +25,9 @@ struct kvm_vcpu_sbi_extension { int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, unsigned long *out_val, struct kvm_cpu_trap *utrap, bool *exit); + + /* Extension specific probe function */ + unsigned long (*probe)(unsigned long extid); }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c index 48f431091cdb..14be1a819588 100644 --- a/arch/riscv/kvm/vcpu_sbi_base.c +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -22,6 +22,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; struct sbiret ecall_ret; + const struct kvm_vcpu_sbi_extension *sbi_ext; switch (cp->a6) { case SBI_EXT_BASE_GET_SPEC_VERSION: @@ -46,8 +47,16 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, */ kvm_riscv_vcpu_sbi_forward(vcpu, run); *exit = true; - } else - *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0; + } else { + sbi_ext = kvm_vcpu_sbi_find_ext(cp->a0); + if (sbi_ext) { + if (sbi_ext->probe) + *out_val = sbi_ext->probe(cp->a0); + else + *out_val = 1; + } else + *out_val = 0; + } break; case SBI_EXT_BASE_GET_MVENDORID: case SBI_EXT_BASE_GET_MARCHID: From patchwork Mon Jul 18 17:02:00 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921482 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 81C9DC43334 for ; Mon, 18 Jul 2022 17:05:59 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235809AbiGRRF6 (ORCPT ); Mon, 18 Jul 2022 13:05:58 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234464AbiGRRFu (ORCPT ); Mon, 18 Jul 2022 13:05:50 -0400 Received: from mail-pl1-x62e.google.com (mail-pl1-x62e.google.com [IPv6:2607:f8b0:4864:20::62e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4411D2C11C for ; Mon, 18 Jul 2022 10:05:48 -0700 (PDT) Received: by mail-pl1-x62e.google.com with SMTP id j12so9588203plj.8 for ; Mon, 18 Jul 2022 10:05:48 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=/3vtOzL7H/3FhOEqt+TCzYFt3aLP8b6PmqU5Yxo7Fj8=; b=4uEa5LBy0DKcY8uwawFXfJDcyP2Fi+jae6+kTt2b2wiPbEE7wbhzRSXq/FBGMgOi/f hw86VLNxjdNtEI0yE51+u//XmslDWbju8DkFTny20OYQvzxqBoeGLkiEKPgITsrsNH0h qQRnpeE8Hwj4q3l6kiYjSyvG3r9D1tENLERXnC58erWdoBU1yy9KVWDXVVuCJbNyZVMu sq8BZOG2Ng3wxgfDGCyKf5yxkMdcvfdb+2Bf7FYJ4fM5MEmBHRL43SS/HZ+TFCiGQNl/ eungSt9TWAodD9oHT7M2KCyNnNMMLzfGz5/ts/D2ADF68OzVyKx2zafdBTLeTq5jeMw+ oYUw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=/3vtOzL7H/3FhOEqt+TCzYFt3aLP8b6PmqU5Yxo7Fj8=; b=w8zqKgIaNi0MZXCjAWTVP+AoCBTlFRtkNQpwFTZ3p3cKPZbBgjWLYJyMk39+udbNsj m3ekrVVTAcmmXW0vN+uV5TIUve0JQv3gRI+hGbedwjU323HK4FwDGA5UnQ7toWE1gyhY 7p6oK6GPwu5V2tnCjQgGOZXs2RK35SY49NUc2sKTzuTtr5WDmzycufjmnVLufg+EVciK /LJhWYO4cnJBde4N7X7R3DtmY1smjQ3rS7MoeNXCvI+Rj20ZZwR89pkkjRcySjcNC0hY 52bW3W9kaARNpP+0gRn8yhEeQgOqrY0YTANbWZVy1hfGBEPUQbbEINEW3lLLuhCDP1bb TqhA== X-Gm-Message-State: AJIora99mQKBmvP/8tqW3mdUjmkjf0dujuPEJpQEXvgZy7RXlSsFVfCg rXAaPz3p11ia3wydnGb+IRdFIg== X-Google-Smtp-Source: AGRyM1uSWzV58VCzHYwLZkTFFYZNxcIXLVpIo0ybe2iNKjofq3fVvwFRP5hLtK6LTf38a8cYPsHKQw== X-Received: by 2002:a17:90b:388e:b0:1f0:3d7f:e620 with SMTP id mu14-20020a17090b388e00b001f03d7fe620mr34153286pjb.31.1658163948380; Mon, 18 Jul 2022 10:05:48 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.47 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:48 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 4/9] RISC-V: KVM: Improve privilege mode filtering for perf Date: Mon, 18 Jul 2022 10:02:00 -0700 Message-Id: <20220718170205.2972215-5-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Currently, the host driver doesn't have any method to identify if the requested perf event is from kvm or bare metal. As KVM runs in HS mode, there are no separate hypervisor privilege mode to distinguish between the attributes for guest/host. Improve the privilege mode filtering by using the event specific config1 field. Signed-off-by: Atish Patra Reviewed-by: Andrew Jones --- drivers/perf/riscv_pmu_sbi.c | 27 ++++++++++++++++++++++----- include/linux/perf/riscv_pmu.h | 2 ++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 5d0eef3ef136..34f9fcc221a8 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -275,6 +275,27 @@ int riscv_pmu_sbi_hpmc_width(void) } EXPORT_SYMBOL(riscv_pmu_sbi_hpmc_width); +static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event) +{ + unsigned long cflags = 0; + bool guest_events = false; + + if (event->attr.config1 & RISCV_KVM_PMU_CONFIG1_GUEST_EVENTS) + guest_events = true; + if (event->attr.exclude_kernel) + cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH; + if (event->attr.exclude_user) + cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH; + if (guest_events && event->attr.exclude_hv) + cflags |= SBI_PMU_CFG_FLAG_SET_SINH; + if (event->attr.exclude_host) + cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH; + if (event->attr.exclude_guest) + cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH; + + return cflags; +} + static int pmu_sbi_ctr_get_idx(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -286,11 +307,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0); unsigned long cflags = 0; - if (event->attr.exclude_kernel) - cflags |= SBI_PMU_CFG_FLAG_SET_SINH; - if (event->attr.exclude_user) - cflags |= SBI_PMU_CFG_FLAG_SET_UINH; - + cflags = pmu_sbi_get_filter_flags(event); /* retrieve the available counter index */ #if defined(CONFIG_32BIT) ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 6fee211c27b5..825b95253bc5 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -26,6 +26,8 @@ #define RISCV_PMU_STOP_FLAG_RESET 1 +#define RISCV_KVM_PMU_CONFIG1_GUEST_EVENTS 0x1 + struct cpu_hw_events { /* currently enabled events */ int n_events; From patchwork Mon Jul 18 17:02:01 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921484 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4C85BCCA485 for ; Mon, 18 Jul 2022 17:06:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235845AbiGRRGE (ORCPT ); Mon, 18 Jul 2022 13:06:04 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57622 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235777AbiGRRF5 (ORCPT ); Mon, 18 Jul 2022 13:05:57 -0400 Received: from mail-pl1-x62f.google.com (mail-pl1-x62f.google.com [IPv6:2607:f8b0:4864:20::62f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 457B92C121 for ; Mon, 18 Jul 2022 10:05:50 -0700 (PDT) Received: by mail-pl1-x62f.google.com with SMTP id 5so9580149plk.9 for ; Mon, 18 Jul 2022 10:05:50 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=g0GXt84vLJgSh96R08zPlPoGq5zRKrVm2Ojh33nPRNc=; b=HgvjExTpPKlEjTRtSNLbdzm9LKmieF9qEs4xJP2LurrKxwYLXTas/D1PhqWPwMtd17 uXNpptix6gFoMRy6B5gqVQdxRTc4tB8ag+7kUYjb4odhRT9dPaA5Lc2kvgVSzyf5meKI W/tfu4dz7K9rPUjRI4dHd+CRVB8u+x8nEqgRSaK+YgC5zDoy41GtLW5AMij16DZqhZBD UN7HC11UkbvvCvj1rgEwWpDwaeva3Ou9kmA11c49GJg83V2o0TpHm2tHQ3EgVIL40njI ny3LFt5kRDkD+YLUDrZyeMC0Pn9/wYDY8mosBvrqoaUEhGWWf7jYhMliDxX1ZNq9sryw vAbg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=g0GXt84vLJgSh96R08zPlPoGq5zRKrVm2Ojh33nPRNc=; b=u+XhT12fghqZlHoIIa6S7C0kZ1Y7KF4j0h7lDmm5R4IiB4dWsL+sOdTpjUF4wXsXty V2d/fLHqdesmItv3p/nwJ4XcYH9J3EiikxJPOgdf0f7VC0i/0s6C7BqRDMK67KeSt+pq OnipFt8fkH5kFhXOTHr6huiTM1BW2bJbKQ+YDfldgFeOBTrCVZZ6UksixdSlflaNqmBS TRjVI9XVkCG0K4ihzHKqCExG1qSXh3qhp7qt6eT+/N/ZZamkiIFpWZJuxP8WeIZFBtRB oNwYgU/RMGkM0jcAsGVMJ9FXkOwtSbXXbxyrqnux9HsdDLRXFIgPTE8smlwwxhQZcUuu SmRg== X-Gm-Message-State: AJIora8b/FAwg8RIA50ffgh4PADF4DkyfW8Pi7MHur0kvCwNJ/Wbi7sM JTU4ydh62CEm5ZsxITPQLWfbSA== X-Google-Smtp-Source: AGRyM1tS6zC84MRMFbRJz9tvWCxV+RPsnhP4oYyrw87iUd3saJSSLC62JLntxczMfiMlDUf7vVWivA== X-Received: by 2002:a17:90b:33ce:b0:1ef:e5f4:f8e2 with SMTP id lk14-20020a17090b33ce00b001efe5f4f8e2mr41166287pjb.70.1658163949636; Mon, 18 Jul 2022 10:05:49 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.48 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:49 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 5/9] RISC-V: KVM: Add skeleton support for perf Date: Mon, 18 Jul 2022 10:02:01 -0700 Message-Id: <20220718170205.2972215-6-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org This patch only adds barebore structure of perf implementation. Most of the function returns zero at this point and will be implemented fully in the future. Signed-off-by: Atish Patra --- arch/riscv/include/asm/kvm_host.h | 3 + arch/riscv/include/asm/kvm_vcpu_pmu.h | 70 +++++++++++++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/main.c | 3 +- arch/riscv/kvm/vcpu.c | 5 + arch/riscv/kvm/vcpu_insn.c | 3 +- arch/riscv/kvm/vcpu_pmu.c | 136 ++++++++++++++++++++++++++ 7 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_pmu.h create mode 100644 arch/riscv/kvm/vcpu_pmu.c diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 59a0cf2ca7b9..5d2312828bb2 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -18,6 +18,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS 1024 @@ -226,6 +227,8 @@ struct kvm_vcpu_arch { /* Don't run the VCPU (blocked) */ bool pause; + + struct kvm_pmu pmu; }; static inline void kvm_arch_hardware_unsetup(void) {} diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h new file mode 100644 index 000000000000..bffee052f2ae --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Rivos Inc + * + * Authors: + * Atish Patra + */ + +#ifndef _KVM_VCPU_RISCV_PMU_H +#define _KVM_VCPU_RISCV_PMU_H + +#include +#include + +#ifdef CONFIG_RISCV_PMU_SBI +#define RISCV_KVM_MAX_FW_CTRS 32 + +/* Per virtual pmu counter data */ +struct kvm_pmc { + u8 idx; + struct kvm_vcpu *vcpu; + struct perf_event *perf_event; + uint64_t counter_val; + union sbi_pmu_ctr_info cinfo; +}; + +/* PMU data structure per vcpu */ +struct kvm_pmu { + struct kvm_pmc pmc[RISCV_MAX_COUNTERS]; + /* Number of the virtual firmware counters available */ + int num_fw_ctrs; + /* Number of the virtual hardware counters available */ + int num_hw_ctrs; + /* Bit map of all the virtual counter used */ + DECLARE_BITMAP(used_pmc, RISCV_MAX_COUNTERS); +}; + +#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) +#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) +#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) + +int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, unsigned long *out_val); +int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *ctr_info); + +int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag, uint64_t ival); +int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag); +int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag, + unsigned long eidx, uint64_t edata); +int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *out_val); +int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); + +#else +struct kvm_pmu { +}; + +static inline int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {} +static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {} +#endif +#endif diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 019df9208bdd..342d7199e89d 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -25,3 +25,4 @@ kvm-y += vcpu_sbi_base.o kvm-y += vcpu_sbi_replace.o kvm-y += vcpu_sbi_hsm.o kvm-y += vcpu_timer.o +kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o vcpu_pmu.o diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 1549205fe5fe..d41ab6d1987d 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -49,7 +49,8 @@ int kvm_arch_hardware_enable(void) hideleg |= (1UL << IRQ_VS_EXT); csr_write(CSR_HIDELEG, hideleg); - csr_write(CSR_HCOUNTEREN, -1UL); + /* VS should access only TM bit. Everything else should trap */ + csr_write(CSR_HCOUNTEREN, 0x02); csr_write(CSR_HVIP, 0); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 3c95924d38c7..4cc964aaf2ad 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -122,6 +122,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); + kvm_riscv_vcpu_pmu_reset(vcpu); vcpu->arch.hfence_head = 0; vcpu->arch.hfence_tail = 0; @@ -174,6 +175,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); + /* setup performance monitoring */ + kvm_riscv_vcpu_pmu_init(vcpu); + /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); @@ -196,6 +200,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Cleanup VCPU timer */ kvm_riscv_vcpu_timer_deinit(vcpu); + kvm_riscv_vcpu_pmu_deinit(vcpu); /* Free unused pages pre-allocated for G-stage page table mappings */ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 7eb90a47b571..0aa334f853c8 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -214,7 +214,8 @@ struct csr_func { unsigned long wr_mask); }; -static const struct csr_func csr_funcs[] = { }; +static const struct csr_func csr_funcs[] = { +}; /** * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c new file mode 100644 index 000000000000..3168ed740bdd --- /dev/null +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Rivos Inc + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include +#include + +int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, unsigned long *out_val) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + if (!kvpmu) + return -EINVAL; + + *out_val = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *ctr_info) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + if (!kvpmu || (cidx > RISCV_MAX_COUNTERS) || (cidx == 1)) + return -EINVAL; + + *ctr_info = kvpmu->pmc[cidx].cinfo.value; + + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag, uint64_t ival) +{ + /* TODO */ + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag) +{ + /* TODO */ + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base, + unsigned long ctr_mask, unsigned long flag, + unsigned long eidx, uint64_t edata) +{ + /* TODO */ + return 0; +} + +int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *out_val) +{ + /* TODO */ + return 0; +} + +int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) +{ + int i = 0, num_hw_ctrs, num_fw_ctrs, hpm_width; + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + + if (!kvpmu) + return -EINVAL; + + num_hw_ctrs = riscv_pmu_sbi_get_num_hw_ctrs(); + if ((num_hw_ctrs + RISCV_KVM_MAX_FW_CTRS) > RISCV_MAX_COUNTERS) + num_fw_ctrs = RISCV_MAX_COUNTERS - num_hw_ctrs; + else + num_fw_ctrs = RISCV_KVM_MAX_FW_CTRS; + + hpm_width = riscv_pmu_sbi_hpmc_width(); + if (hpm_width <= 0) { + pr_err("Can not initialize PMU for vcpu as hpmcounter width is not available\n"); + return -EINVAL; + } + + kvpmu->num_hw_ctrs = num_hw_ctrs; + kvpmu->num_fw_ctrs = num_fw_ctrs; + /* + * There is no corelation betwen the logical hardware counter and virtual counters. + * However, we need to encode a hpmcounter CSR in the counter info field so that + * KVM can trap n emulate the read. This works well in the migraiton usecase as well + * KVM doesn't care if the actual hpmcounter is available in the hardware or not. + */ + for (i = 0; i < num_hw_ctrs + num_fw_ctrs; i++) { + /* TIME CSR shouldn't be read from perf interface */ + if (i == 1) + continue; + kvpmu->pmc[i].idx = i; + kvpmu->pmc[i].vcpu = vcpu; + if (i < kvpmu->num_hw_ctrs) { + kvpmu->pmc[i].cinfo.type = SBI_PMU_CTR_TYPE_HW; + if (i < 3) + /* CY, IR counters */ + kvpmu->pmc[i].cinfo.width = 63; + else + kvpmu->pmc[i].cinfo.width = hpm_width; + /* + * The CSR number doesn't have any relation with the logical + * hardware counters. The CSR numbers are encoded sequentially + * to avoid maintaining a map between the virtual counter + * and CSR number. + */ + kvpmu->pmc[i].cinfo.csr = CSR_CYCLE + i; + } else { + kvpmu->pmc[i].cinfo.type = SBI_PMU_CTR_TYPE_FW; + kvpmu->pmc[i].cinfo.width = BITS_PER_LONG - 1; + } + } + + return 0; +} + +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) +{ + /* TODO */ +} + +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) +{ + /* TODO */ +} + From patchwork Mon Jul 18 17:02:02 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921483 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B64ACC43334 for ; Mon, 18 Jul 2022 17:06:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235775AbiGRRGB (ORCPT ); Mon, 18 Jul 2022 13:06:01 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57608 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235792AbiGRRF6 (ORCPT ); Mon, 18 Jul 2022 13:05:58 -0400 Received: from mail-pj1-x1035.google.com (mail-pj1-x1035.google.com [IPv6:2607:f8b0:4864:20::1035]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A62B22C127 for ; Mon, 18 Jul 2022 10:05:51 -0700 (PDT) Received: by mail-pj1-x1035.google.com with SMTP id z12-20020a17090a7b8c00b001ef84000b8bso18831508pjc.1 for ; Mon, 18 Jul 2022 10:05:51 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=6NyzMxmt/WAE6+0kMZIP7wK2TvVq8SGkQD4SMxCtamA=; b=rE2p2vnI0/LzOurYbXKj4a1dOYnbseYzbfsMdemkPv4As2iWMSTt2DB6SveWWZ83bE Tk9XPTQiBtGl6zi8QHUbOgHAaWFo189CV6uO9dPD2VMSxoS+PCn3rGrPakfP7DUDkotq kcYB4LiERhIDMCqNxqoHIZvTKepp323ApuWpGfOTSPo09TedaCwpzo5VqXF1t8psbA/h BYIk+vIJz+xRlek2rg4cIzEodJRQQan/1Mc/KaIisB/nVHTMfNIeW8pGXUwe3MMuKCAw LlCcf9/S3UkVz/oLdgk5vIIOdiDkiRq4geG4fQwQ9kVhYvFln1fahMBqkktEb1awnhh2 RcIw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=6NyzMxmt/WAE6+0kMZIP7wK2TvVq8SGkQD4SMxCtamA=; b=CUwvAalBvA+qW5r/VJb82s5JLDgY9BtQMYwy9pxLKSSR3oK8OVOFmk3x1dmqyituXa gFK22oHZBT+NKRl19yseF3UNqNheGQ79Ro/0ZDF8hr4l5XZZq6l5WZWrFNtPxcwMkjLK +BmUMvtpkm85Qrxm5ByenhakYoOjPjbhv5tz69L/I6fPj2CxTtgvXfh82lbMjeJsY/Pw Av5w38XhECvCld3UPDmm8wAJ9G4x8MK+efV/SUrTvMJLlBT4dfjcIoPEmgt0gsl4vAx2 oB/QbdLhdvklU5j8Rzd6o4gPPlhTCLTQTL9HHifuahjit256H9AD/nBvAcTvnSTwCmkq Hh/Q== X-Gm-Message-State: AJIora8R/ZKQDVS9KcKnGlLGmdrfBE1vcwVC0wJphIzXCcsE0WbZDOru ABMlH0QxjE7+4gCxy8Ky+77+lA== X-Google-Smtp-Source: AGRyM1uH69PKvvBNxNCme8+NWvuUJNuL8CI0U/0FZX6mfAo3vPZF1+2AA8FuSPDj+/ikwZ2KkGTmsw== X-Received: by 2002:a17:90a:c088:b0:1ef:b85c:576b with SMTP id o8-20020a17090ac08800b001efb85c576bmr39888216pjs.237.1658163950929; Mon, 18 Jul 2022 10:05:50 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.49 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:50 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 6/9] RISC-V: KVM: Add SBI PMU extension support Date: Mon, 18 Jul 2022 10:02:02 -0700 Message-Id: <20220718170205.2972215-7-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org SBI PMU extension allows KVM guests to configure/start/stop/query about the PMU counters in virtualized enviornment as well. In order to allow that, KVM implements the entire SBI PMU extension. Signed-off-by: Atish Patra --- arch/riscv/kvm/vcpu_sbi.c | 11 +++++ arch/riscv/kvm/vcpu_sbi_pmu.c | 81 +++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 arch/riscv/kvm/vcpu_sbi_pmu.c diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index d45e7da3f0d3..da9f7959340e 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -50,6 +50,16 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; +#ifdef CONFIG_RISCV_PMU_SBI +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu; +#else +static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { + .extid_start = -1UL, + .extid_end = -1UL, + .handler = NULL, +}; +#endif + static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, &vcpu_sbi_ext_base, @@ -58,6 +68,7 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_rfence, &vcpu_sbi_ext_srst, &vcpu_sbi_ext_hsm, + &vcpu_sbi_ext_pmu, &vcpu_sbi_ext_experimental, &vcpu_sbi_ext_vendor, }; diff --git a/arch/riscv/kvm/vcpu_sbi_pmu.c b/arch/riscv/kvm/vcpu_sbi_pmu.c new file mode 100644 index 000000000000..90c51a95d4f4 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_pmu.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Rivos Inc + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include + +static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) +{ + int ret = -EOPNOTSUPP; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + uint64_t temp; + + switch (funcid) { + case SBI_EXT_PMU_NUM_COUNTERS: + ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, out_val); + break; + case SBI_EXT_PMU_COUNTER_GET_INFO: + ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, out_val); + break; + case SBI_EXT_PMU_COUNTER_CFG_MATCH: +#if defined(CONFIG_32BIT) + temp = ((uint64_t)cp->a5 << 32) | cp->a4; +#else + temp = cp->a4; +#endif + ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1, cp->a2, cp->a3, temp); + if (ret >= 0) { + *out_val = ret; + ret = 0; + } + break; + case SBI_EXT_PMU_COUNTER_START: +#if defined(CONFIG_32BIT) + temp = ((uint64_t)cp->a4 << 32) | cp->a3; +#else + temp = cp->a3; +#endif + ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2, temp); + break; + case SBI_EXT_PMU_COUNTER_STOP: + ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2); + break; + case SBI_EXT_PMU_COUNTER_FW_READ: + ret = kvm_riscv_vcpu_pmu_ctr_read(vcpu, cp->a0, out_val); + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +unsigned long kvm_sbi_ext_pmu_probe(unsigned long extid) +{ + /* + * PMU Extension is only available to guests if privilege mode filtering + * is available. Otherwise, guest will always count events while the + * execution is in hypervisor mode. + */ + return riscv_isa_extension_available(NULL, SSCOFPMF); +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { + .extid_start = SBI_EXT_PMU, + .extid_end = SBI_EXT_PMU, + .handler = kvm_sbi_ext_pmu_handler, + .probe = kvm_sbi_ext_pmu_probe, +}; From patchwork Mon Jul 18 17:02:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921485 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E5259CCA482 for ; Mon, 18 Jul 2022 17:06:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235877AbiGRRGF (ORCPT ); Mon, 18 Jul 2022 13:06:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57660 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235800AbiGRRF6 (ORCPT ); Mon, 18 Jul 2022 13:05:58 -0400 Received: from mail-pg1-x529.google.com (mail-pg1-x529.google.com [IPv6:2607:f8b0:4864:20::529]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7BA152C130 for ; Mon, 18 Jul 2022 10:05:52 -0700 (PDT) Received: by mail-pg1-x529.google.com with SMTP id h132so11122254pgc.10 for ; Mon, 18 Jul 2022 10:05:52 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=5MMCsELdRAdJh5aLmNFcZOyRr/D0RiLtBOcd/jSRJiU=; b=SZ6A4CByNWqPAfqTGaz70lPNVZuptERZ0n0u8v6E97djjISARvexx57xy8GGUdpF+7 r/wz4gME1iPiN5m7T/2MCqyMwwdb3xeM41QcjwrrZmA5tMKBIkMLySe/imOL/raWFmnG xKa2mhJFZi0teVbY57hPs2+Bzzc0QSh8qG7kXumoXK9+hpWi6PD4chu1ayhJkh+miktd kXl5bOQIqIO/rE0/ExIZcBvmUHepnI2gs3SEIrxc3DXGAcDopac/yjWfOFX1XQdiPV6v 0OgNbjia5UFMdtMEtdDpfKRXrszArD3bfhiVSlVxJie93lXnVZuvy7mPDqFJzLTsWe0j j/mg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=5MMCsELdRAdJh5aLmNFcZOyRr/D0RiLtBOcd/jSRJiU=; b=Fu525V9uOC5lwaIjMJZXcOD8ZwnjlcsnbN+2Gu0idwyA6BdbYdq2VeW1rRMCSKJs4S g759tl3aoI1Kfm3hiBDABJI0ZTt7mH5EdkHhnWm+52/ovxxVEjwYc9peQDIb9eQ6t+SX BL+VwPiMkjT/XSm+cjGM3HRIi6geKDdz5MJFpgUTa6I3wdYIAOdOfYGykj4sJYYiAEpq GAC6XcI6Wy0wPvrns8fdjz1yGOZlvHkIoAMtzqKNJFrS/a0oPOTHFHpVePLaTJRUSnbA AjcHtUMb0eD8aonTK3t5+fa8dUk/Jm81vD8u4mS4RABl3KkRtozQIrdqNd6vGEB8B13B 5J1g== X-Gm-Message-State: AJIora9q1avBgJH4bKu4BOvPFFflwDWveq1yzQDz0OOMbQsppPdTqg/w X5Wt1Xp+0+z8dSwK2sbRcThUIw== X-Google-Smtp-Source: AGRyM1vVmhagOiYrnqGiFf5EZMjik6HESCVupUtRm1lG/F6JRUTWiEEbCeHwm6Al2hYOL7mbJoR4Ng== X-Received: by 2002:a63:6bc1:0:b0:40d:ffa8:2605 with SMTP id g184-20020a636bc1000000b0040dffa82605mr26252146pgc.299.1658163952186; Mon, 18 Jul 2022 10:05:52 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.51 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:51 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 7/9] RISC-V: KVM: Implement trap & emulate for hpmcounters Date: Mon, 18 Jul 2022 10:02:03 -0700 Message-Id: <20220718170205.2972215-8-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org As the KVM guests only see the virtual PMU counters, all hpmcounter access should trap and KVM emulates the read access on behalf of guests. Signed-off-by: Atish Patra --- arch/riscv/include/asm/kvm_vcpu_pmu.h | 16 +++++++++ arch/riscv/kvm/vcpu_insn.c | 1 + arch/riscv/kvm/vcpu_pmu.c | 47 +++++++++++++++++++++++---- 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index bffee052f2ae..5410236b62a8 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -39,6 +39,19 @@ struct kvm_pmu { #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) +#if defined(CONFIG_32BIT) +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{ .base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{ .base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#else +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{ .base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +#endif + +int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask); + int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, unsigned long *out_val); int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, unsigned long *ctr_info); @@ -59,6 +72,9 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; +#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ +{ .base = 0, .count = 0, .func = NULL }, + static inline int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) { diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 0aa334f853c8..7c2a4b1a69f7 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -215,6 +215,7 @@ struct csr_func { }; static const struct csr_func csr_funcs[] = { + KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS }; /** diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 3168ed740bdd..5434051f495d 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -14,6 +14,46 @@ #include #include +int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, + unsigned long *out_val) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + u64 enabled, running; + + if (!kvpmu) + return -EINVAL; + + pmc = &kvpmu->pmc[cidx]; + if (!pmc->perf_event) + return -EINVAL; + + pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); + *out_val = pmc->counter_val; + + return 0; +} + +int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int cidx, ret = KVM_INSN_CONTINUE_NEXT_SEPC; + + if (!kvpmu) + return KVM_INSN_EXIT_TO_USER_SPACE; + //TODO: Should we check if vcpu pmu is initialized or not! + if (wr_mask) + return KVM_INSN_ILLEGAL_TRAP; + cidx = csr_num - CSR_CYCLE; + + if (kvm_riscv_vcpu_pmu_ctr_read(vcpu, cidx, val) < 0) + return KVM_INSN_EXIT_TO_USER_SPACE; + + return ret; +} + int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, unsigned long *out_val) { struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); @@ -60,13 +100,6 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba return 0; } -int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, - unsigned long *out_val) -{ - /* TODO */ - return 0; -} - int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) { int i = 0, num_hw_ctrs, num_fw_ctrs, hpm_width; From patchwork Mon Jul 18 17:02:04 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921487 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 179BFC43334 for ; Mon, 18 Jul 2022 17:06:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235878AbiGRRGL (ORCPT ); Mon, 18 Jul 2022 13:06:11 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57232 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235810AbiGRRF6 (ORCPT ); Mon, 18 Jul 2022 13:05:58 -0400 Received: from mail-pg1-x52d.google.com (mail-pg1-x52d.google.com [IPv6:2607:f8b0:4864:20::52d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 36BC62C135 for ; Mon, 18 Jul 2022 10:05:54 -0700 (PDT) Received: by mail-pg1-x52d.google.com with SMTP id s27so11113643pga.13 for ; Mon, 18 Jul 2022 10:05:54 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=DudDSxwzmUDI8860lqzmSl3O3ezUAmmtuRpFvcpbiqM=; b=JMzb1okk1veZl1WbSRPR3z17BIsd9O28rSDVH2XWQZCNRxgtk2tCWrlaeK5NxJTDEb FJWxY4TJmoF3uHGy9WZrm9GYkZ36LMHqAlVR2tm8j+c2ZoCZDQrPIBdkGgR+QgAH1Ohi z7tYM8RpqGojLgmmp0e0PRXY6gyP8SzP2CCBeQRg95QP6tk4fxllVfcixW1Rm5j/2Zna XGs69YvPA2xMlFeY76U7OYenE5lcyVzTa5y98YGTtRBKYt94Azob+tJYzUlSxIseK/ob esOf2ChMSWIasebDWTQgiuQoUzqQc/CzzoPjCeNeUQFhr5ygWelbNFItPI9EZr7XrFHo 8zsA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=DudDSxwzmUDI8860lqzmSl3O3ezUAmmtuRpFvcpbiqM=; b=Fh81Gg4wcwqvN0TiPDel5IjjBbqv9BOOS+lCNBLuohPPJzSsr+6clSsCLhiF1wFKtZ kioVKIBXIl86ucytWosiE266sDrgDFErsLOnWlzCCGewwHuHROSm1OHokEzpy6dqg54V 5tfzAXdEOFjwRbcMyPOKre6C2CFWmUcPx3ZWdnuHt0cOcJNqgloCU95fjOH/9a4NaVWy z8bcVjkEzVnGRdXK4SA0FVSdq88vtOXOiZsLMNlgSaGnf8t4vJnRbHZzjbKviGWyOrFg jQ4n7Uy2q/YkXZ7/D7iR2+Q3pmqlY6HooZeWtT61Pg1Z41tFqtGdaVIjUKlH+bvSlS4m huPA== X-Gm-Message-State: AJIora+Ei+oD24BYuwZyF/qkBRSkrj9qI/wIulZ1hTnM/RElOOX6XILP 69eetN9bbi3UppwY+qOI3mEDJw== X-Google-Smtp-Source: AGRyM1uaEuvYAKQDRhJRtgw3nQLKDv5Q+LFtC+ezT9Le6ONfC43Kv6qU/4zj/oJlbv8mg+IvdwsCug== X-Received: by 2002:a63:5c42:0:b0:412:b2e9:97e4 with SMTP id n2-20020a635c42000000b00412b2e997e4mr26596777pgm.36.1658163953563; Mon, 18 Jul 2022 10:05:53 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.52 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:53 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 8/9] RISC-V: KVM: Implement perf support Date: Mon, 18 Jul 2022 10:02:04 -0700 Message-Id: <20220718170205.2972215-9-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org RISC-V SBI PMU & Sscofpmf ISA extension allows supporting perf in the virtualization enviornment as well. KVM implementation relies on SBI PMU extension for most of the part while traps & emulates the CSRs read for counter access. Signed-off-by: Atish Patra --- arch/riscv/kvm/vcpu_pmu.c | 318 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 301 insertions(+), 17 deletions(-) diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 5434051f495d..278c261efad3 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -11,9 +11,163 @@ #include #include #include +#include #include #include +#define get_event_type(x) ((x & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16) +#define get_event_code(x) (x & SBI_PMU_EVENT_IDX_CODE_MASK) + +static inline u64 pmu_get_sample_period(struct kvm_pmc *pmc) +{ + u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); + u64 sample_period; + + if (!pmc->counter_val) + sample_period = counter_val_mask; + else + sample_period = pmc->counter_val & counter_val_mask; + + return sample_period; +} + +static u32 pmu_get_perf_event_type(unsigned long eidx) +{ + enum sbi_pmu_event_type etype = get_event_type(eidx); + u32 type; + + if (etype == SBI_PMU_EVENT_TYPE_HW) + type = PERF_TYPE_HARDWARE; + else if (etype == SBI_PMU_EVENT_TYPE_CACHE) + type = PERF_TYPE_HW_CACHE; + else if (etype == SBI_PMU_EVENT_TYPE_RAW || etype == SBI_PMU_EVENT_TYPE_FW) + type = PERF_TYPE_RAW; + else + type = PERF_TYPE_MAX; + + return type; +} + +static inline bool pmu_is_fw_event(unsigned long eidx) +{ + enum sbi_pmu_event_type etype = get_event_type(eidx); + + return (etype == SBI_PMU_EVENT_TYPE_FW) ? true : false; +} + +static void pmu_release_perf_event(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + perf_event_disable(pmc->perf_event); + perf_event_release_kernel(pmc->perf_event); + pmc->perf_event = NULL; + } +} + +static u64 pmu_get_perf_event_hw_config(u32 sbi_event_code) +{ + /* SBI PMU HW event code is offset by 1 from perf hw event codes */ + return (u64)sbi_event_code - 1; +} + +static u64 pmu_get_perf_event_cache_config(u32 sbi_event_code) +{ + u64 config = U64_MAX; + unsigned int cache_type, cache_op, cache_result; + + /* All the cache event masks lie within 0xFF. No separate masking is necesssary */ + cache_type = (sbi_event_code & SBI_PMU_EVENT_CACHE_ID_CODE_MASK) >> 3; + cache_op = (sbi_event_code & SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK) >> 1; + cache_result = sbi_event_code & SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK; + + if (cache_type >= PERF_COUNT_HW_CACHE_MAX || + cache_op >= PERF_COUNT_HW_CACHE_OP_MAX || + cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + goto out; + config = cache_type | (cache_op << 8) | (cache_result << 16); +out: + return config; +} + +static u64 pmu_get_perf_event_config(unsigned long eidx, uint64_t edata) +{ + enum sbi_pmu_event_type etype = get_event_type(eidx); + u32 ecode = get_event_code(eidx); + u64 config = U64_MAX; + + if (etype == SBI_PMU_EVENT_TYPE_HW) + config = pmu_get_perf_event_hw_config(ecode); + else if (etype == SBI_PMU_EVENT_TYPE_CACHE) + config = pmu_get_perf_event_cache_config(ecode); + else if (etype == SBI_PMU_EVENT_TYPE_RAW) + config = edata & RISCV_PMU_RAW_EVENT_MASK; + else if ((etype == SBI_PMU_EVENT_TYPE_FW) && (ecode < SBI_PMU_FW_MAX)) + config = (1ULL << 63) | ecode; + + return config; +} + +static int pmu_get_fixed_pmc_index(unsigned long eidx) +{ + u32 etype = pmu_get_perf_event_type(eidx); + u32 ecode = get_event_code(eidx); + int ctr_idx; + + if (etype != SBI_PMU_EVENT_TYPE_HW) + return -EINVAL; + + if (ecode == SBI_PMU_HW_CPU_CYCLES) + ctr_idx = 0; + else if (ecode == SBI_PMU_HW_INSTRUCTIONS) + ctr_idx = 2; + else + return -EINVAL; + + return ctr_idx; +} + +static int pmu_get_programmable_pmc_index(struct kvm_pmu *kvpmu, unsigned long eidx, + unsigned long cbase, unsigned long cmask) +{ + int ctr_idx = -1; + int i, pmc_idx; + int min, max; + + if (pmu_is_fw_event(eidx)) { + /* Firmware counters are mapped 1:1 starting from num_hw_ctrs for simplicity */ + min = kvpmu->num_hw_ctrs; + max = min + kvpmu->num_fw_ctrs; + } else { + /* First 3 counters are reserved for fixed counters */ + min = 3; + max = kvpmu->num_hw_ctrs; + } + + for_each_set_bit(i, &cmask, BITS_PER_LONG) { + pmc_idx = i + cbase; + if ((pmc_idx >= min && pmc_idx < max) && + !test_bit(pmc_idx, kvpmu->used_pmc)) { + ctr_idx = pmc_idx; + break; + } + } + + return ctr_idx; +} + +static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx, + unsigned long cbase, unsigned long cmask) +{ + int ret; + + /* Fixed counters need to be have fixed mapping as they have different width */ + ret = pmu_get_fixed_pmc_index(eidx); + if (ret >= 0) + return ret; + + return pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask); +} + int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, unsigned long *out_val) { @@ -43,7 +197,6 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, if (!kvpmu) return KVM_INSN_EXIT_TO_USER_SPACE; - //TODO: Should we check if vcpu pmu is initialized or not! if (wr_mask) return KVM_INSN_ILLEGAL_TRAP; cidx = csr_num - CSR_CYCLE; @@ -81,14 +234,62 @@ int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx, int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, unsigned long ctr_mask, unsigned long flag, uint64_t ival) { - /* TODO */ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int i, num_ctrs, pmc_index; + struct kvm_pmc *pmc; + + num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; + if (ctr_base + __fls(ctr_mask) >= num_ctrs) + return -EINVAL; + + /* Start the counters that have been configured and requested by the guest */ + for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { + pmc_index = i + ctr_base; + if (!test_bit(pmc_index, kvpmu->used_pmc)) + continue; + pmc = &kvpmu->pmc[pmc_index]; + if (flag & SBI_PMU_START_FLAG_SET_INIT_VALUE) + pmc->counter_val = ival; + if (pmc->perf_event) { + perf_event_period(pmc->perf_event, pmu_get_sample_period(pmc)); + perf_event_enable(pmc->perf_event); + } + } + return 0; } int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, unsigned long ctr_mask, unsigned long flag) { - /* TODO */ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + int i, num_ctrs, pmc_index; + u64 enabled, running; + struct kvm_pmc *pmc; + + num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; + if ((ctr_base + __fls(ctr_mask)) >= num_ctrs) + return -EINVAL; + + /* Stop the counters that have been configured and requested by the guest */ + for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { + pmc_index = i + ctr_base; + if (!test_bit(pmc_index, kvpmu->used_pmc)) + continue; + pmc = &kvpmu->pmc[pmc_index]; + if (pmc->perf_event) { + /* Stop counting the counter */ + perf_event_disable(pmc->perf_event); + if (flag & SBI_PMU_STOP_FLAG_RESET) { + /* Relase the counter if this is a reset request */ + pmc->counter_val += perf_event_read_value(pmc->perf_event, + &enabled, &running); + pmu_release_perf_event(pmc); + clear_bit(pmc_index, kvpmu->used_pmc); + } + } + } + return 0; } @@ -96,14 +297,85 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba unsigned long ctr_mask, unsigned long flag, unsigned long eidx, uint64_t edata) { - /* TODO */ - return 0; + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct perf_event *event; + struct perf_event_attr attr; + int num_ctrs, ctr_idx; + u32 etype = pmu_get_perf_event_type(eidx); + u64 config; + struct kvm_pmc *pmc; + + num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; + if ((etype == PERF_TYPE_MAX) || ((ctr_base + __fls(ctr_mask)) >= num_ctrs)) + return -EINVAL; + + if (pmu_is_fw_event(eidx)) + return -EOPNOTSUPP; + /* + * SKIP_MATCH flag indicates the caller is aware of the assigned counter + * for this event. Just do a sanity check if it already marked used. + */ + if (flag & SBI_PMU_CFG_FLAG_SKIP_MATCH) { + if (!test_bit(ctr_base, kvpmu->used_pmc)) + return -EINVAL; + ctr_idx = ctr_base; + goto match_done; + } + + ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask); + if (ctr_idx < 0) + return -EOPNOTSUPP; + +match_done: + pmc = &kvpmu->pmc[ctr_idx]; + pmu_release_perf_event(pmc); + pmc->idx = ctr_idx; + + config = pmu_get_perf_event_config(eidx, edata); + memset(&attr, 0, sizeof(struct perf_event_attr)); + attr.type = etype; + attr.size = sizeof(attr); + attr.pinned = true; + + /* + * It should never reach here if the platform doesn't support sscofpmf extensio + * as mode filtering won't work without it. + */ + attr.exclude_host = true; + attr.exclude_hv = true; + attr.exclude_user = flag & SBI_PMU_CFG_FLAG_SET_UINH ? 1 : 0; + attr.exclude_kernel = flag & SBI_PMU_CFG_FLAG_SET_SINH ? 1 : 0; + attr.config = config; + attr.config1 = RISCV_KVM_PMU_CONFIG1_GUEST_EVENTS; + if (flag & SBI_PMU_CFG_FLAG_CLEAR_VALUE) { + //TODO: Do we really want to clear the value in hardware counter + pmc->counter_val = 0; + } + /* + * Set the default sample_period for now. The guest specified value + * will be updated in the start call. + */ + attr.sample_period = pmu_get_sample_period(pmc); + + event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc); + if (IS_ERR(event)) { + pr_err("kvm pmu event creation failed event %pe for eidx %lx\n", event, eidx); + return -EOPNOTSUPP; + } + + set_bit(ctr_idx, kvpmu->used_pmc); + pmc->perf_event = event; + if (flag & SBI_PMU_CFG_FLAG_AUTO_START) + perf_event_enable(pmc->perf_event); + + return ctr_idx; } int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) { int i = 0, num_hw_ctrs, num_fw_ctrs, hpm_width; struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; if (!kvpmu) return -EINVAL; @@ -120,6 +392,7 @@ int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) return -EINVAL; } + bitmap_zero(kvpmu->used_pmc, RISCV_MAX_COUNTERS); kvpmu->num_hw_ctrs = num_hw_ctrs; kvpmu->num_fw_ctrs = num_fw_ctrs; /* @@ -132,38 +405,49 @@ int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) /* TIME CSR shouldn't be read from perf interface */ if (i == 1) continue; - kvpmu->pmc[i].idx = i; - kvpmu->pmc[i].vcpu = vcpu; + pmc = &kvpmu->pmc[i]; + pmc->idx = i; + pmc->counter_val = 0; + pmc->vcpu = vcpu; if (i < kvpmu->num_hw_ctrs) { kvpmu->pmc[i].cinfo.type = SBI_PMU_CTR_TYPE_HW; if (i < 3) /* CY, IR counters */ - kvpmu->pmc[i].cinfo.width = 63; + pmc->cinfo.width = 63; else - kvpmu->pmc[i].cinfo.width = hpm_width; + pmc->cinfo.width = hpm_width; /* * The CSR number doesn't have any relation with the logical * hardware counters. The CSR numbers are encoded sequentially * to avoid maintaining a map between the virtual counter * and CSR number. */ - kvpmu->pmc[i].cinfo.csr = CSR_CYCLE + i; + pmc->cinfo.csr = CSR_CYCLE + i; } else { - kvpmu->pmc[i].cinfo.type = SBI_PMU_CTR_TYPE_FW; - kvpmu->pmc[i].cinfo.width = BITS_PER_LONG - 1; + pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; + pmc->cinfo.width = BITS_PER_LONG - 1; } } return 0; } -void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) +void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) { - /* TODO */ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + int i; + + if (!kvpmu) + return; + + for_each_set_bit(i, kvpmu->used_pmc, RISCV_MAX_COUNTERS) { + pmc = &kvpmu->pmc[i]; + pmu_release_perf_event(pmc); + } } -void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) +void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) { - /* TODO */ + kvm_riscv_vcpu_pmu_deinit(vcpu); } - From patchwork Mon Jul 18 17:02:05 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Atish Kumar Patra X-Patchwork-Id: 12921486 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AC2E5C43334 for ; Mon, 18 Jul 2022 17:06:08 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235677AbiGRRGH (ORCPT ); Mon, 18 Jul 2022 13:06:07 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57702 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235817AbiGRRF6 (ORCPT ); Mon, 18 Jul 2022 13:05:58 -0400 Received: from mail-pg1-x532.google.com (mail-pg1-x532.google.com [IPv6:2607:f8b0:4864:20::532]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8152A2C13F for ; Mon, 18 Jul 2022 10:05:55 -0700 (PDT) Received: by mail-pg1-x532.google.com with SMTP id q16so8039847pgq.6 for ; Mon, 18 Jul 2022 10:05:55 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=oOsWXe1XlE7Dvyt6YnaIl4l/lBbF3+FHoY2EVuHBogc=; b=yDEh3i7cvGOG1Tg6a3LH5g6p9IZ71ROXWiKkAcCBDzpRGIkynsCnDocSHaB+p5lMtJ un3KvvMO1rR4R6lnmasBC5U/GtKZCLfazzQczKJ460Swi58kGxQQGVKHHLwGSZmXg52d jAtF35upbVQffkVvkequyyH558IkEUcoc8q2Ta0kG9/goGLIh6PxPSMEhL/edqqRa/RU 3SY6aBwkzzhR/2uFSpIH2LMHaADdwA9v2eYNcHXpv5YnJIYXvNJVnj53+Dslhe2tZold mCQD+57SYP0fEBMAGQSZHaskYwdskEPm0nGJwtL0AYAaeMlsu4wrXigjxkFByncTorSX Lt7A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=oOsWXe1XlE7Dvyt6YnaIl4l/lBbF3+FHoY2EVuHBogc=; b=LAkhhN1NKjYsANfzXIu2hSedPpPg6s+wxuvH5BuK9wcgbf3lwNp/SSR9wcDSPhY3/o VbBPZCV6LC34HbJrodYawkmRNScl0fz6NNJssF2WrA4n2MU2n70MnQOQMdEbPXGa4Aez jc1vsdHDF7ppA8OJHI52OZOx3tG/nV62nwt28/STEQIuP6sqnq6gG+H8baWeXNJtGw91 rk0eHmoahAAn3iIzZkCQSE+qLS05urjmr1AxaOmwxw/dgcVY/uIDz501Ha5gaI8dK6Mf IeOkPsdRX0JReS83InH9skKPne0PQPa0DKYI3vSsS7qNYULoEzUcIVMYLjcyeMCYC59O WkAQ== X-Gm-Message-State: AJIora+h8nftEbezkds++xH/x0m0wZbrkUmhzIh2Ar7QzAlg26tm33GQ OPBi46i7dIncNXcHgomIOAJutg== X-Google-Smtp-Source: AGRyM1u3Q+C0LQCywanESfw4GyKuwBIrIwjITjzR6A1JvgUVZ5Tnlck04W/lkZFWguLgJ06YyWcaMA== X-Received: by 2002:a63:3101:0:b0:419:a4c7:649a with SMTP id x1-20020a633101000000b00419a4c7649amr23650792pgx.199.1658163954901; Mon, 18 Jul 2022 10:05:54 -0700 (PDT) Received: from atishp.ba.rivosinc.com ([66.220.2.162]) by smtp.gmail.com with ESMTPSA id r10-20020a170902be0a00b0016bc947c5b7sm9733402pls.38.2022.07.18.10.05.53 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Mon, 18 Jul 2022 10:05:54 -0700 (PDT) From: Atish Patra To: linux-kernel@vger.kernel.org Cc: Atish Patra , Albert Ou , Anup Patel , Atish Patra , Guo Ren , kvm-riscv@lists.infradead.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, Mark Rutland , Palmer Dabbelt , Paul Walmsley , Will Deacon Subject: [RFC 9/9] RISC-V: KVM: Implement firmware events Date: Mon, 18 Jul 2022 10:02:05 -0700 Message-Id: <20220718170205.2972215-10-atishp@rivosinc.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220718170205.2972215-1-atishp@rivosinc.com> References: <20220718170205.2972215-1-atishp@rivosinc.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org SBI PMU extension defines a set of firmware events which can provide useful information to guests about number of SBI calls. As hypervisor implements the SBI PMU extension, these firmware events corresponds to ecall invocations between VS->HS mode. All other firmware events will always report zero if monitored as KVM doesn't implement them. Signed-off-by: Atish Patra --- arch/riscv/include/asm/kvm_vcpu_pmu.h | 16 +++++ arch/riscv/include/asm/sbi.h | 2 +- arch/riscv/kvm/tlb.c | 6 +- arch/riscv/kvm/vcpu_pmu.c | 90 +++++++++++++++++++++++---- arch/riscv/kvm/vcpu_sbi_replace.c | 7 +++ 5 files changed, 106 insertions(+), 15 deletions(-) diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index 5410236b62a8..d68b17ea796b 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -15,6 +15,14 @@ #ifdef CONFIG_RISCV_PMU_SBI #define RISCV_KVM_MAX_FW_CTRS 32 +struct kvm_fw_event { + /* Current value of the event */ + unsigned long value; + + /* Event monitoring status */ + bool started; +}; + /* Per virtual pmu counter data */ struct kvm_pmc { u8 idx; @@ -22,11 +30,14 @@ struct kvm_pmc { struct perf_event *perf_event; uint64_t counter_val; union sbi_pmu_ctr_info cinfo; + /* Monitoring event ID */ + unsigned long event_idx; }; /* PMU data structure per vcpu */ struct kvm_pmu { struct kvm_pmc pmc[RISCV_MAX_COUNTERS]; + struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS]; /* Number of the virtual firmware counters available */ int num_fw_ctrs; /* Number of the virtual hardware counters available */ @@ -48,6 +59,7 @@ struct kvm_pmu { { .base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, #endif +int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, unsigned long *val, unsigned long new_val, unsigned long wr_mask); @@ -75,6 +87,10 @@ struct kvm_pmu { #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ { .base = 0, .count = 0, .func = NULL }, +static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +{ + return 0; +} static inline int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) { diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 2a0ef738695e..a192a95a34eb 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -171,7 +171,7 @@ enum sbi_pmu_fw_generic_events_t { SBI_PMU_FW_IPI_SENT = 6, SBI_PMU_FW_IPI_RECVD = 7, SBI_PMU_FW_FENCE_I_SENT = 8, - SBI_PMU_FW_FENCE_I_RECVD = 9, + SBI_PMU_FW_FENCE_I_RCVD = 9, SBI_PMU_FW_SFENCE_VMA_SENT = 10, SBI_PMU_FW_SFENCE_VMA_RCVD = 11, SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12, diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 1a76d0b1907d..0793d39e8ff7 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -240,6 +240,7 @@ void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu) void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD); local_flush_icache_all(); } @@ -323,15 +324,18 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu) d.addr, d.size, d.order); break; case KVM_RISCV_HFENCE_VVMA_ASID_GVA: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); kvm_riscv_local_hfence_vvma_asid_gva( READ_ONCE(v->vmid), d.asid, d.addr, d.size, d.order); break; case KVM_RISCV_HFENCE_VVMA_ASID_ALL: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD); kvm_riscv_local_hfence_vvma_asid_all( READ_ONCE(v->vmid), d.asid); break; case KVM_RISCV_HFENCE_VVMA_GVA: + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD); kvm_riscv_local_hfence_vvma_gva( READ_ONCE(v->vmid), d.addr, d.size, d.order); @@ -382,7 +386,7 @@ void kvm_riscv_fence_i(struct kvm *kvm, unsigned long hbase, unsigned long hmask) { make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I, - KVM_REQ_FENCE_I, NULL); + KVM_REQ_FENCE_I, NULL); } void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 278c261efad3..f451d7ac2608 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -168,21 +168,39 @@ static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx, return pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask); } +int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) +{ + struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); + struct kvm_fw_event *fevent; + + if (!kvpmu || fid >= SBI_PMU_FW_MAX) + return -EINVAL; + + fevent = &kvpmu->fw_event[fid]; + if (fevent->started) + fevent->value++; + + return 0; +} + int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx, unsigned long *out_val) { struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); struct kvm_pmc *pmc; u64 enabled, running; + int fevent_code; if (!kvpmu) return -EINVAL; pmc = &kvpmu->pmc[cidx]; - if (!pmc->perf_event) - return -EINVAL; - pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + pmc->counter_val = kvpmu->fw_event[fevent_code].value; + } else if (pmc->perf_event) + pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); *out_val = pmc->counter_val; return 0; @@ -237,6 +255,7 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); int i, num_ctrs, pmc_index; struct kvm_pmc *pmc; + int fevent_code; num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; if (ctr_base + __fls(ctr_mask) >= num_ctrs) @@ -250,7 +269,14 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base, pmc = &kvpmu->pmc[pmc_index]; if (flag & SBI_PMU_START_FLAG_SET_INIT_VALUE) pmc->counter_val = ival; - if (pmc->perf_event) { + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + if (fevent_code >= SBI_PMU_FW_MAX) + return -EINVAL; + + kvpmu->fw_event[fevent_code].started = true; + kvpmu->fw_event[fevent_code].value = pmc->counter_val; + } else if (pmc->perf_event) { perf_event_period(pmc->perf_event, pmu_get_sample_period(pmc)); perf_event_enable(pmc->perf_event); } @@ -266,6 +292,7 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, int i, num_ctrs, pmc_index; u64 enabled, running; struct kvm_pmc *pmc; + int fevent_code; num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; if ((ctr_base + __fls(ctr_mask)) >= num_ctrs) @@ -277,7 +304,12 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, if (!test_bit(pmc_index, kvpmu->used_pmc)) continue; pmc = &kvpmu->pmc[pmc_index]; - if (pmc->perf_event) { + if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { + fevent_code = get_event_code(pmc->event_idx); + if (fevent_code >= SBI_PMU_FW_MAX) + return -EINVAL; + kvpmu->fw_event[fevent_code].started = false; + } else if (pmc->perf_event) { /* Stop counting the counter */ perf_event_disable(pmc->perf_event); if (flag & SBI_PMU_STOP_FLAG_RESET) { @@ -285,9 +317,12 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base, pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); pmu_release_perf_event(pmc); - clear_bit(pmc_index, kvpmu->used_pmc); } } + if (flag & SBI_PMU_STOP_FLAG_RESET) { + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; + clear_bit(pmc_index, kvpmu->used_pmc); + } } return 0; @@ -303,14 +338,19 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba int num_ctrs, ctr_idx; u32 etype = pmu_get_perf_event_type(eidx); u64 config; - struct kvm_pmc *pmc; + struct kvm_pmc *pmc = NULL; + bool is_fevent; + unsigned long event_code; num_ctrs = kvpmu->num_fw_ctrs + kvpmu->num_hw_ctrs; if ((etype == PERF_TYPE_MAX) || ((ctr_base + __fls(ctr_mask)) >= num_ctrs)) return -EINVAL; - if (pmu_is_fw_event(eidx)) + event_code = get_event_code(eidx); + is_fevent = pmu_is_fw_event(eidx); + if (is_fevent && event_code >= SBI_PMU_FW_MAX) return -EOPNOTSUPP; + /* * SKIP_MATCH flag indicates the caller is aware of the assigned counter * for this event. Just do a sanity check if it already marked used. @@ -319,13 +359,23 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba if (!test_bit(ctr_base, kvpmu->used_pmc)) return -EINVAL; ctr_idx = ctr_base; - goto match_done; + if (is_fevent) + goto perf_event_done; + else + goto match_done; } ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask); if (ctr_idx < 0) return -EOPNOTSUPP; + /* + * No need to create perf events for firmware events as the firmware counter + * is supposed to return the measurement of VS->HS mode invocations. + */ + if (is_fevent) + goto perf_event_done; + match_done: pmc = &kvpmu->pmc[ctr_idx]; pmu_release_perf_event(pmc); @@ -363,17 +413,26 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba return -EOPNOTSUPP; } - set_bit(ctr_idx, kvpmu->used_pmc); pmc->perf_event = event; - if (flag & SBI_PMU_CFG_FLAG_AUTO_START) - perf_event_enable(pmc->perf_event); +perf_event_done: + if (flag & SBI_PMU_CFG_FLAG_AUTO_START) { + if (is_fevent) + kvpmu->fw_event[event_code].started = true; + else + perf_event_enable(pmc->perf_event); + } + /* This should be only true for firmware events */ + if (!pmc) + pmc = &kvpmu->pmc[ctr_idx]; + pmc->event_idx = eidx; + set_bit(ctr_idx, kvpmu->used_pmc); return ctr_idx; } int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) { - int i = 0, num_hw_ctrs, num_fw_ctrs, hpm_width; + int i, num_hw_ctrs, num_fw_ctrs, hpm_width; struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); struct kvm_pmc *pmc; @@ -395,6 +454,7 @@ int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) bitmap_zero(kvpmu->used_pmc, RISCV_MAX_COUNTERS); kvpmu->num_hw_ctrs = num_hw_ctrs; kvpmu->num_fw_ctrs = num_fw_ctrs; + memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); /* * There is no corelation betwen the logical hardware counter and virtual counters. * However, we need to encode a hpmcounter CSR in the counter info field so that @@ -409,6 +469,7 @@ int kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) pmc->idx = i; pmc->counter_val = 0; pmc->vcpu = vcpu; + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; if (i < kvpmu->num_hw_ctrs) { kvpmu->pmc[i].cinfo.type = SBI_PMU_CTR_TYPE_HW; if (i < 3) @@ -444,7 +505,10 @@ void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) for_each_set_bit(i, kvpmu->used_pmc, RISCV_MAX_COUNTERS) { pmc = &kvpmu->pmc[i]; pmu_release_perf_event(pmc); + pmc->counter_val = 0; + pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; } + memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event)); } void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 4c034d8a606a..614ae127e102 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -12,6 +12,7 @@ #include #include #include +#include #include static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -25,6 +26,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (cp->a6 != SBI_EXT_TIME_SET_TIMER) return -EINVAL; + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER); #if __riscv_xlen == 32 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; #else @@ -55,6 +57,7 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (cp->a6 != SBI_EXT_IPI_SEND_IPI) return -EINVAL; + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT); kvm_for_each_vcpu(i, tmp, vcpu->kvm) { if (hbase != -1UL) { if (tmp->vcpu_id < hbase) @@ -65,6 +68,7 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RECVD); } return ret; @@ -89,6 +93,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run switch (funcid) { case SBI_EXT_RFENCE_REMOTE_FENCE_I: kvm_riscv_fence_i(vcpu->kvm, hbase, hmask); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: if (cp->a2 == 0 && cp->a3 == 0) @@ -96,6 +101,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run else kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, cp->a2, cp->a3, PAGE_SHIFT); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: if (cp->a2 == 0 && cp->a3 == 0) @@ -106,6 +112,7 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run hbase, hmask, cp->a2, cp->a3, PAGE_SHIFT, cp->a4); + kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: