From patchwork Mon Mar 21 22:43:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787827 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 671DCC433EF for ; Mon, 21 Mar 2022 22:48:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230122AbiCUWtk (ORCPT ); Mon, 21 Mar 2022 18:49:40 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51660 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229942AbiCUWtV (ORCPT ); Mon, 21 Mar 2022 18:49:21 -0400 Received: from mail-oo1-xc4a.google.com (mail-oo1-xc4a.google.com [IPv6:2607:f8b0:4864:20::c4a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DC3FF38BB49 for ; Mon, 21 Mar 2022 15:44:04 -0700 (PDT) Received: by mail-oo1-xc4a.google.com with SMTP id t31-20020a4a96e2000000b00320f7e020c3so10510010ooi.13 for ; Mon, 21 Mar 2022 15:44:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=hRh/aDRf4TFZL6zvr1bxrdAuUBcTWbp/rzavH8BxDmc=; b=N7QoakYla67hg2YwGMLs+8CQiTvN1le0Y1szw2+lTeF6J0oD3TknBPWha0WZdUSHBi vzJTf6ft6Z5Ecn9+Z5SLXjBG1V8G8zN+RUSvgOXdZzemZFwR+yb35QVxC7R8kFmswS+o eSAJeclif2PWNQXWk9ewedIX8yZxa8+2O7YNRS6KiQNb/g8IMlUY0bPgzWnWw00i6s/0 8dwobgEeVzQ28Jq4sh8tAq54i6xRF32OHBlHqZF3dlFNdwLW7V/bS4HUBSpdFPNo0OFI U5Y1NQoL+Be8GUIwbsmJqSuBnlEF5HD7Ym5s4s4XKS+4aeBV86GzokV7jBmfBmfKjy4T a9xQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=hRh/aDRf4TFZL6zvr1bxrdAuUBcTWbp/rzavH8BxDmc=; b=uYwW9IxXFL1mdh+xIFmypC1kDk3MiLD20xNXitkbqxRPG0Zsufo1g8co8k8PipWrZo krjzW2i6xE97xQg5G7VEI3ir1SSgm8LzKsh6NDuFm4aPB9+/TdHzDxbDjHnBwC/ZDsuo sXAlzDBsw+nppVafRGqrbxQukZpfz2lFdpGMRNIwl5pEEdjMYU3TtEpLkaFpKKYvtojR 3RqVaYyeucrAPSW0bCKHh7Qh/KT24FJGBuscYUotNc7m3DehJ+d7nyerLn3dl0zMleCt UCU/XjVmF5t1vRjtJ27RlJGscJgcf5UhHqPYr4QtxirQ0KolWbDOBTiFWdRSfFxxmYP6 u31A== X-Gm-Message-State: AOAM532NcB6LLOcSrxu4szvqlBkdJhmv+t+dSMxf5s4kbo7exjxHWHKb Pknj9aZhqrP6oWSu8C6w7VdLySlk8c3R X-Google-Smtp-Source: ABdhPJx/TZE9wohAlA7RmM4J8+LSEqgX4e7w4WODqSwLPP/ymPVzGNN5Wz4oKrcYJwE9RVAbBNatidr5DU7J X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:aca:2311:0:b0:2ec:cb84:c5bb with SMTP id e17-20020aca2311000000b002eccb84c5bbmr721672oie.246.1647902643700; Mon, 21 Mar 2022 15:44:03 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:50 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-2-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 1/9] KVM: x86/mmu: Move implementation of make_spte to a helper From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Move the implementation of make_spte to a helper function. This will facilitate factoring out all uses of the vCPU pointer from the helper in subsequent commits. No functional change intended. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/spte.c | 20 +++++++++++++++----- arch/x86/kvm/mmu/spte.h | 4 ++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 4739b53c9734..d3da0d3d41cb 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -90,11 +90,10 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - const struct kvm_memory_slot *slot, - unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, - u64 old_spte, bool prefetch, bool can_unsync, - bool host_writable, u64 *new_spte) +bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + const struct kvm_memory_slot *slot, unsigned int pte_access, + gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, + bool can_unsync, bool host_writable, u64 *new_spte) { int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; @@ -192,6 +191,17 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return wrprot; } +bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + const struct kvm_memory_slot *slot, + unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, + u64 old_spte, bool prefetch, bool can_unsync, + bool host_writable, u64 *new_spte) +{ + return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte, + prefetch, can_unsync, host_writable, new_spte); + +} + static u64 make_spte_executable(u64 spte) { bool is_access_track = is_access_track_spte(spte); diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 73f12615416f..3fae3c3124f7 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -410,6 +410,10 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } +bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, + const struct kvm_memory_slot *slot, unsigned int pte_access, + gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, + bool can_unsync, bool host_writable, u64 *new_spte); bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, From patchwork Mon Mar 21 22:43:51 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787826 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7774EC433FE for ; Mon, 21 Mar 2022 22:48:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229854AbiCUWti (ORCPT ); Mon, 21 Mar 2022 18:49:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54734 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230006AbiCUWtW (ORCPT ); Mon, 21 Mar 2022 18:49:22 -0400 Received: from mail-pj1-x1049.google.com (mail-pj1-x1049.google.com [IPv6:2607:f8b0:4864:20::1049]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A60504759F1 for ; Mon, 21 Mar 2022 15:44:06 -0700 (PDT) Received: by mail-pj1-x1049.google.com with SMTP id w3-20020a17090ac98300b001b8b914e91aso282066pjt.0 for ; Mon, 21 Mar 2022 15:44:06 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=u4JgM+75AZti27lWXqtslukLZQOfvOSzkXisiK6DciE=; b=O3iLZaJFpBD2w8/KBPwwtdSPS3VW/mW+S1HIPBHLXcwgAJ4BjjapYoLUcZEAthI5CM 8pg6yLMvfbQfStqh4mPuZwedjV4YTYq5XoMkdOgLw3CW9MvcGCEK2TFcZT3csUGPf6GV 4Ffa1CQEmHjLFXFLB+W15T2iQBD9TnMIYYMGyQcAxVhffbp2lf9atPSu3uuUkW+HjxcU U41z6fnybdcxtkbHs0+h3m28ei5FXR8FoVvb4T7lu/3rssBKeMct+czWbQK1qIW59mP3 n9RF3DR+RUFh4gAbrQXcMXDZBIZvnLvAfmk9RC/grvCY7vmD52esqOFUZo3eAP4m+omR z0Ng== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=u4JgM+75AZti27lWXqtslukLZQOfvOSzkXisiK6DciE=; b=3c4vgLpbk2Q9M4o3DVNFbuFsdSpWLd5G8uzXqoyJOk+ebBmiBotka7jgRd7jkxbvLm KxlaCyah1SA5L0PoL09Rq4dglRiJ+QtFrNrz51yHprMXOG29UqrO/fLbpCgdFsiSFDcJ 3Pt8mJk2Js1b7kzSHPyMMETnIVVJk2ECIfq8u9U961CBmpXA9opfDAcxTu2/VicXZRpd 5fdcX3I/+t03nVVQhM4Fm/PE/ZqOGd573VXdA/Px+M5d5dKfZdF65xNsz8boQyN6hK76 CDiR7gQVUTQk8n4Owse0zSak8umZMppPvguVn2laS6jXw6ncfP3oX7pNIT4d4kAHDftk 7TFQ== X-Gm-Message-State: AOAM5326M0AV+vaV/GksnDXkK/trrDtiXgBKI6xKYw/3ZSm11iddI/EP 2LfEbFYQfSYHiiB3gjaJgQKjZjjEmqHc X-Google-Smtp-Source: ABdhPJzjrjlRXuCSKs/r2vXduiSUY2+WMaIe9AiFUfDVX2HOaPToQVhD+JfsRG79K2Fz+le1NG7UYlGRPRjA X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a05:6a00:1596:b0:4f9:f992:9f69 with SMTP id u22-20020a056a00159600b004f9f9929f69mr25497286pfk.7.1647902646189; Mon, 21 Mar 2022 15:44:06 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:51 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-3-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 2/9] KVM: x86/mmu: Factor mt_mask out of __make_spte From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org In service of removing the vCPU pointer from __make_spte, factor the memory type mask calculation out of __make_spte. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/spte.c | 12 ++++++++---- arch/x86/kvm/mmu/spte.h | 3 ++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index d3da0d3d41cb..931cf93c3b7e 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -93,7 +93,8 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, - bool can_unsync, bool host_writable, u64 *new_spte) + bool can_unsync, bool host_writable, u64 mt_mask, + u64 *new_spte) { int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; @@ -130,8 +131,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) - spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, - kvm_is_mmio_pfn(pfn)); + spte |= mt_mask; if (host_writable) spte |= shadow_host_writable_mask; @@ -197,8 +197,12 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) { + u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn, + kvm_is_mmio_pfn(pfn)); + return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte, - prefetch, can_unsync, host_writable, new_spte); + prefetch, can_unsync, host_writable, mt_mask, + new_spte); } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 3fae3c3124f7..d051f955699e 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -413,7 +413,8 @@ static inline u64 get_mmio_spte_generation(u64 spte) bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, - bool can_unsync, bool host_writable, u64 *new_spte); + bool can_unsync, bool host_writable, u64 mt_mask, + u64 *new_spte); bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, From patchwork Mon Mar 21 22:43:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787825 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8A5D0C433EF for ; Mon, 21 Mar 2022 22:48:08 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230105AbiCUWta (ORCPT ); Mon, 21 Mar 2022 18:49:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51870 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229758AbiCUWtY (ORCPT ); Mon, 21 Mar 2022 18:49:24 -0400 Received: from mail-pf1-x449.google.com (mail-pf1-x449.google.com [IPv6:2607:f8b0:4864:20::449]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7A76A36E3E for ; Mon, 21 Mar 2022 15:44:09 -0700 (PDT) Received: by mail-pf1-x449.google.com with SMTP id c6-20020a621c06000000b004fa7307e2e0so5835258pfc.6 for ; Mon, 21 Mar 2022 15:44:09 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=JAqLlS07OA/mtsxC0lr2Zf5cFpfFqBoRXPdNY1Wgz24=; b=H0R79d5UcBxs6D8qTK2NTVn6tY35vwRgidZ9zKMSXB6AwWAv393rNwo+bk6Kobd6Z5 oRZV28Nt+t3/pMmVONdGgSbhQmFKF1O3YFr49wCsjKjt3jqNXClIKbn9rtYDAStiWYH6 en2bFzaGUklc2rPx+LnmFK/I+5qR7NPW22uRZsApafZ+drfZt9UVbr3bEMAMmoJ9QqCn SZs6gzWXmc96NYNWk8Sv6UWauWLr2i3zc3RvA7a8yfzvlWypGS0q3PYA7Sbv+VlXeili 7btitqt8zcL1ZY35A8yPxwCp7++B/0K0l/LqF4PrZ4n2PiPCxyKXVW0fo8OkJefTS/zt qPUg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=JAqLlS07OA/mtsxC0lr2Zf5cFpfFqBoRXPdNY1Wgz24=; b=kBurMRDm1jT8Y6iQlITiEKLUnermn1hB/8Zb+Bb6dpeqJQwyfCV+ouWxkvUzHYjOXc 5Ze0aQ4IHTegwkDdrXGIADV32jME0ljXFmve4DxQTiaKCKoBeX07IGB8yQeikNsXEUQu gFRxWcNeQFQGhjXSWWRcl4WTfrGFz7KUr2eM6mHUi42SgTu2WOrTQU/NgyGB5NKH3/J0 F9jEmqtr0Cc7Z86P/WRn2FF987xbubXRvQMXO5VMKnJIAqxGUS5QAWNbjN0BdrE8IXxh HBlIkQdxcaq/DiE0WAFRnEKudAR/Fp9/tb4rfBMR3pSYJwQRCt1NdibGS6BO8Fo2A6f9 /NKw== X-Gm-Message-State: AOAM530ZzSusmSZaZRpFQuBjxC8q8yV6GM+7P6cRUy15IhuIQZOfFM1q vKFtyeJF+jb1zsEqO0yzVSpYISkO0y8U X-Google-Smtp-Source: ABdhPJxAtqyFX+JDYmmYwf3rvEhv5BiLfQfw2OU879/OP5hly2ytp4JfLR+hS/L7jvzneV2Wb/vLVdfuP+fm X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a17:903:2348:b0:154:dd0:aba8 with SMTP id c8-20020a170903234800b001540dd0aba8mr15543475plh.51.1647902649014; Mon, 21 Mar 2022 15:44:09 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:52 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-4-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 3/9] KVM: x86/mmu: Factor shadow_zero_check out of __make_spte From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org In the interest of devloping a version of __make_spte that can function without a vCPU pointer, factor out the shadow_zero_mask to be an additional argument to the function. No functional change intended. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/spte.c | 10 ++++++---- arch/x86/kvm/mmu/spte.h | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 931cf93c3b7e..ef2d85577abb 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -94,7 +94,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 mt_mask, - u64 *new_spte) + struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte) { int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; @@ -177,9 +177,9 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (prefetch) spte = mark_spte_for_access_track(spte); - WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), + WARN_ONCE(is_rsvd_spte(shadow_zero_check, spte, level), "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, - get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); + get_rsvd_bits(shadow_zero_check, spte, level)); if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { /* Enforced by kvm_mmu_hugepage_adjust. */ @@ -199,10 +199,12 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, { u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); + struct rsvd_bits_validate *shadow_zero_check = + &vcpu->arch.mmu->shadow_zero_check; return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte, prefetch, can_unsync, host_writable, mt_mask, - new_spte); + shadow_zero_check, new_spte); } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index d051f955699e..e8a051188eb6 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -414,7 +414,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 mt_mask, - u64 *new_spte); + struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte); bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, From patchwork Mon Mar 21 22:43:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787830 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 92B2AC433F5 for ; Mon, 21 Mar 2022 22:48:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229966AbiCUWtr (ORCPT ); Mon, 21 Mar 2022 18:49:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52558 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230034AbiCUWt0 (ORCPT ); Mon, 21 Mar 2022 18:49:26 -0400 Received: from mail-pj1-x1049.google.com (mail-pj1-x1049.google.com [IPv6:2607:f8b0:4864:20::1049]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 587D42AC77 for ; Mon, 21 Mar 2022 15:44:12 -0700 (PDT) Received: by mail-pj1-x1049.google.com with SMTP id b9-20020a17090aa58900b001b8b14b4aabso257292pjq.9 for ; Mon, 21 Mar 2022 15:44:12 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=N/VqAqp6gLrZv99t4Njzo2cELtOAzHXSK8UPoavhuKQ=; b=mcmZRlLuczmyzx0s2+txXbdamltCrph70eCBEhtFHOvXlDp5ARTOrdpARtK5XjDuq/ /QSyThdWNlgl09K6ie4aoRZsa9DnMIO4AUf1aB5quXDa++jLJLuDaq0r6RLOITOehL2G UxAyo1V2O2hF8uzY1cZAenEmg1GuRJK5040lKh/i24X9VFRn9j0DAufUUwfQcBbFP3la Z88KuHe2CPCmRF18dpZ3xHoVXM91Iibz0NcgSyPUGj9P6QvDko1CuxZd6bGhECq/uC4r 8il+WcQub0GJ8/Sk8ATiI8TzN3XzIzg2blaYqcoRuenSpyio1nFgqwl+rbBRpHoV5kfc eeNw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=N/VqAqp6gLrZv99t4Njzo2cELtOAzHXSK8UPoavhuKQ=; b=BpGGwEBpRjtx92NJ4G0zgPHYwCfoGUJATgezPwOvkE6Q/y/A+zAm9YvBLhYz0gK/Z8 85lkt5N9008UbW8ZVNz8bPoA4aXtyGuXXqu4qJtuZypfMSaeGdysq3p7XBp4KZlpjJpQ F4pXz22TiNV+Epr0fbXOGmRsJAMmIIQ7tnYHYdV2lGVNTD0TBhNpW/LHRYTpT6YGHIwh RCs8q9rwnRkx+muByGCyAufmNzgYK2QCLBWSB7ULw43wawDZlusFZsShUjVUGGE+D8vV Yid0G/XK14vsl22hKDkucjnuEpoumDR04zI/yrQabC0hoZsfEEZeXw2iewG6VmiKqSTQ YTaQ== X-Gm-Message-State: AOAM530mzG/PqAYwLp6eqeCTGAxDlC0MJ3x7FqJWdoFiEo9OWP2aadbP hKQmG15KQMXxYKTVbIWaVQQIjHTofwzo X-Google-Smtp-Source: ABdhPJxVGCsN92veaK7XgFpIeG5LFpswGfzkkUAPxNMiuzTEbr6kXLk7fUqQnVex+6LVlldWiThRI3baU2Ir X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a17:902:6b89:b0:154:623c:9517 with SMTP id p9-20020a1709026b8900b00154623c9517mr5803697plk.45.1647902651774; Mon, 21 Mar 2022 15:44:11 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:53 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-5-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 4/9] KVM: x86/mmu: Replace vcpu argument with kvm pointer in make_spte From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org No that nothing in make_spte actually needs the vCPU argument, just pass in a pointer to the struct kvm. This allows the function to be used in situations where there is no relevant struct vcpu. No functional change intended. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/spte.c | 9 ++++----- arch/x86/kvm/mmu/spte.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index ef2d85577abb..45e9c0c3932e 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -90,7 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, +bool __make_spte(struct kvm *kvm, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 mt_mask, @@ -161,7 +161,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { + if (mmu_try_to_unsync_pages(kvm, slot, gfn, can_unsync, prefetch)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); wrprot = true; @@ -184,7 +184,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { /* Enforced by kvm_mmu_hugepage_adjust. */ WARN_ON(level > PG_LEVEL_4K); - mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); + mark_page_dirty_in_slot(kvm, slot, gfn); } *new_spte = spte; @@ -202,10 +202,9 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct rsvd_bits_validate *shadow_zero_check = &vcpu->arch.mmu->shadow_zero_check; - return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte, + return __make_spte(vcpu->kvm, sp, slot, pte_access, gfn, pfn, old_spte, prefetch, can_unsync, host_writable, mt_mask, shadow_zero_check, new_spte); - } static u64 make_spte_executable(u64 spte) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index e8a051188eb6..cee02fe63429 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -410,7 +410,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } -bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, +bool __make_spte(struct kvm *kvm, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 mt_mask, From patchwork Mon Mar 21 22:43:54 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787829 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2021EC433FE for ; Mon, 21 Mar 2022 22:48:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229853AbiCUWtp (ORCPT ); Mon, 21 Mar 2022 18:49:45 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54738 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229798AbiCUWt3 (ORCPT ); Mon, 21 Mar 2022 18:49:29 -0400 Received: from mail-pj1-x1049.google.com (mail-pj1-x1049.google.com [IPv6:2607:f8b0:4864:20::1049]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id F2C42527D6 for ; Mon, 21 Mar 2022 15:44:14 -0700 (PDT) Received: by mail-pj1-x1049.google.com with SMTP id mw8-20020a17090b4d0800b001c717bb058eso363594pjb.0 for ; Mon, 21 Mar 2022 15:44:14 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=ry/V9YKDNiv2vz1hDLgo+eFR8ahO5mH8znmtscP7EmE=; b=WwG2u7IpZgea5H5GWN2LQKdFaBLk/jfiid/ciXEV7lby07wuiAKU6CWpErdYgWG7sP Lq9m3DglPE2OgHasPd0SOuZRh6xLkbOVphpN8n7QhTel+9/yHIWA/OF/h3k0RTWKvtK5 LCmwhOoEcyWdhm7pIsyZNK1zFE48KkJ3cQ3iGs1kxoPhqrSltqaFF3ndiHec8jrw9rL7 QQ3VC+90lgaePJAYPmsOQHCoNnvx0bWeXFxGMDPONKufR5xvjS0AicLVUpPTXzcPGCDP J6fb+pbaKjzLeRtijlSnOhYcKKAp7eIKHWTxaf3ubt3Fw73UqC1kOHgS3I+xHF1oxRPC BkFA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=ry/V9YKDNiv2vz1hDLgo+eFR8ahO5mH8znmtscP7EmE=; b=hdXHXfpvwIDpvzVU4yYbROgIQNdVxRH83jbpvdmyFCAi0090VMWrNzuVsAm3CpG1Du /3u7kYZLTNHelP3YUs44kXob6pF1do7Ho04v0dSdQvCtoInGnCIMU4hs++T65z3TPXjO 2xTWRjw8KDQufOMr1OaSrhUsWPkh0pXSVpv9uBTmBIi0EPGNrGkL/HOnPN4GQOWWyPGi /FjGju7gX4RTHO4GyEfkMbE/RT2a1w7Weh4b4qX5+JlIKMAo9fo+G3S5mcIPzHPnYW5g 7RQAfQ10x6P1yARpLJMrLYptw32bVcB8Xv63YMg7UuKdR2Gtd+NDB7avhWAk4pRdNJJ4 y+bQ== X-Gm-Message-State: AOAM531whoQnz8xrinjcUTOSeWZ6JjGGmvB0dwOC8P+zrgtoj0qvJz/s a5GbgBchCqlv22p0cOhTsF5XZ0OAGswB X-Google-Smtp-Source: ABdhPJzvtJaDirlNw2N/gfEsXwwJ07X/ORBgROWSzoIs87S3gs5eF48UAvimJequA6LLUN827xFgWpELHZ2N X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a17:902:6b89:b0:154:623c:9517 with SMTP id p9-20020a1709026b8900b00154623c9517mr5803835plk.45.1647902654448; Mon, 21 Mar 2022 15:44:14 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:54 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-6-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 5/9] KVM: x86/mmu: Factor out the meat of reset_tdp_shadow_zero_bits_mask From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Factor out the implementation of reset_tdp_shadow_zero_bits_mask to a helper function which does not require a vCPU pointer. The only element of the struct kvm_mmu context used by the function is the shadow root level, so pass that in too instead of the mmu context. No functional change intended. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/mmu.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3b8da8b0745e..6f98111f8f8b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4487,16 +4487,14 @@ static inline bool boot_cpu_is_amd(void) * possible, however, kvm currently does not do execution-protection. */ static void -reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) +build_tdp_shadow_zero_bits_mask(struct rsvd_bits_validate *shadow_zero_check, + int shadow_root_level) { - struct rsvd_bits_validate *shadow_zero_check; int i; - shadow_zero_check = &context->shadow_zero_check; - if (boot_cpu_is_amd()) __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, false, + shadow_root_level, false, boot_cpu_has(X86_FEATURE_GBPAGES), false, true); else @@ -4507,12 +4505,19 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) if (!shadow_me_mask) return; - for (i = context->shadow_root_level; --i >= 0;) { + for (i = shadow_root_level; --i >= 0;) { shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; } } +static void +reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) +{ + build_tdp_shadow_zero_bits_mask(&context->shadow_zero_check, + context->shadow_root_level); +} + /* * as the comments in reset_shadow_zero_bits_mask() except it * is the shadow page table for intel nested guest. From patchwork Mon Mar 21 22:43:55 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787828 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 809ABC433F5 for ; Mon, 21 Mar 2022 22:48:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229907AbiCUWto (ORCPT ); Mon, 21 Mar 2022 18:49:44 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:51870 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229873AbiCUWt3 (ORCPT ); Mon, 21 Mar 2022 18:49:29 -0400 Received: from mail-pf1-x44a.google.com (mail-pf1-x44a.google.com [IPv6:2607:f8b0:4864:20::44a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 69B1F3DDCFB for ; Mon, 21 Mar 2022 15:44:17 -0700 (PDT) Received: by mail-pf1-x44a.google.com with SMTP id i2-20020a056a00224200b004fa60c248a1so7251478pfu.13 for ; Mon, 21 Mar 2022 15:44:17 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=EY82t1wI0UkhgCIBG4tN+ElVzer0/ZPSO68cRK6+QHY=; b=bNAahe1AAw7nZrPbGPotsZ/9/5r2zqHzBYhxHca6ivEJcUpuhqmFvNbmKDfd1tPTOM NI8G62wJgoX+kIscR2ym13RiegPhmok71V4VyqR+YctTQFiufY3VzK4VALG/y8wK8cMe WCrCTt+La2ab+rXba+2kEO1/pMcsy0ZTqgxhUSQbMH9vlJQFCLJK1HCHvIur8Lv6SK2l N+Dm+Zam3ieoxiw6otCrzHOy+11V4FoJOIKIdZ1Y/Qr/2OmpRB2cRqO8jSScT5uoMD0+ EYofeXONg33NLaCJb8A/IKFXBtkIgV/GPolpdGNxJP+XDnw0KyAQPZFLaWc37Okehn8b G1Mg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=EY82t1wI0UkhgCIBG4tN+ElVzer0/ZPSO68cRK6+QHY=; b=Sxs9KvP42n/1TD9zuud4evULbya/I0q/gTENh+gZ5WsD4C8Kw7B8lA+YnN/zcZYmLJ yqC5uoXUi/WuepkIHlGBmUO3+sO9XqpW/SUIP6gQw7axS3lUd3FKwYtrVtXpqzIx5k8N jo4iny/KVjhqe5QGLNl7yNF8dWWflarCYWypzGWrQcmi2BOB5GlUXVKEQ2A/D1/CF71W HEMLtZW29FOA7G9MAeMCEo9Jy3O5yKVK+qLUvkiXYrmNnlkJ2cS2kKK3p0HOF214QSsI EKqY56sr8vHOoDif6YdcFpYeXaYHHYjvmhhPXMzPUmuy+oKtaRaclTpj+CYXa4FeKbfx 3Qrw== X-Gm-Message-State: AOAM530OHBT3BEqLUhC3ujxvnc0SpskjxZPNf6pCjdfXLzQ2wmmvref0 1kcqmd96S2h45v9PHDjMoTZKSAGPkpwp X-Google-Smtp-Source: ABdhPJym2hQm7vVkuMwMNLG3NwQtfGOTVM/0doXvdsomWSxSvSrvQt2pIWF3trMKjA3TrOJig60VFP+3m825 X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:aa7:8889:0:b0:4f7:7283:e378 with SMTP id z9-20020aa78889000000b004f77283e378mr25972668pfe.36.1647902656923; Mon, 21 Mar 2022 15:44:16 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:55 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-7-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 6/9] KVM: x86/mmu: Factor out part of vmx_get_mt_mask which does not depend on vcpu From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Factor out the parts of vmx_get_mt_mask which do not depend on the vCPU argument. This also requires adding some error reporting to the helper function to say whether it was possible to generate the MT mask without a vCPU argument. This refactoring will allow the MT mask to be computed when noncoherent DMA is not enabled on a VM. No functional change intended. Signed-off-by: Ben Gardon --- arch/x86/kvm/vmx/vmx.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e8963f5af618..69c654567475 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7149,9 +7149,26 @@ static int __init vmx_check_processor_compat(void) return 0; } +static bool vmx_try_get_mt_mask(struct kvm *kvm, gfn_t gfn, + bool is_mmio, u64 *mask) +{ + if (is_mmio) { + *mask = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; + return true; + } + + if (!kvm_arch_has_noncoherent_dma(kvm)) { + *mask = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; + return true; + } + + return false; +} + static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; + u64 mask; /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in * memory aliases with conflicting memory types and sometimes MCEs. @@ -7171,11 +7188,8 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) * EPT memory type is used to emulate guest CD/MTRR. */ - if (is_mmio) - return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; - - if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) - return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; + if (vmx_try_get_mt_mask(vcpu->kvm, gfn, is_mmio, &mask)) + return mask; if (kvm_read_cr0(vcpu) & X86_CR0_CD) { if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) From patchwork Mon Mar 21 22:43:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787824 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 388EDC433EF for ; Mon, 21 Mar 2022 22:48:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229893AbiCUWt1 (ORCPT ); Mon, 21 Mar 2022 18:49:27 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52264 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229988AbiCUWtU (ORCPT ); Mon, 21 Mar 2022 18:49:20 -0400 Received: from mail-pg1-x549.google.com (mail-pg1-x549.google.com [IPv6:2607:f8b0:4864:20::549]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E9E6D476A55 for ; Mon, 21 Mar 2022 15:44:19 -0700 (PDT) Received: by mail-pg1-x549.google.com with SMTP id q13-20020a638c4d000000b003821725ad66so6584546pgn.23 for ; Mon, 21 Mar 2022 15:44:19 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=dc2dPnRhrgxNQaDjtZuInpm9Efgh6hIcIz8o5ukUhwE=; b=DrKMziogyI2ZhedcH4G4Ei3Fu4JW8Wd2ZTt1gxzXIfpkq12wquOjlp5QgHBMbewKm9 CP5kTVW4LXSe0XcE32gMbU1rEM657zM0PXCVTfV17toFk2abzj+qdRdsRBCpxFSK1Aq2 9U47CMdKDp2HDj6EC1vM6HJ0t8cxsqqsTx1v8HiQ+ivLm5qFug9GNWwWtn5wSeSO/E/n LScUWVhL/lggw07xDM41WkRpqBIAvRDzWpFEFg/8yDLoR5qZ966Iynnia4oIjYoUqrTI o5nXq/cMf7D3TiuEk0ZZjdKM/2ASj1QViEcvI5+hIxgReIqaYpNs4dyGwnGd0/sKMhrv 5/vA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=dc2dPnRhrgxNQaDjtZuInpm9Efgh6hIcIz8o5ukUhwE=; b=IP/4nj2UcUbCmkjYzOX0Wwqh4EcT44asFbSKoQvE2fBwaf46/FiOH5RkoBeDzQMACS HG5Ro9+OW9TGEQ9b6Xgr9MzfkGbvVKIA4tDgEF4//Eyx4bxKcG+NMOb41Wlf4OJRtXaS kpJjygZKpSc/BH8/CqJ7oHq9jY0wCRiW2P8jzNslIUn6KSxzSldIdgfdGRyZ5lV0xU7Q WnvVXXU1RZSSH/WR15vnaL9CmcuHZmsTk969EF9lLgrRM1n4l/yjB3jc2Yh9rXgNJyEi Om3WeaLPuTe2l3kMSwbO7BhxMjK8vsnUNzuNtJRzsqzMRogbNhFgg4MEOLsjVLPIcmhf W5Kw== X-Gm-Message-State: AOAM532U8sIH73PA85TrQtPW3RORvynYYzvSHC+/BWW8ntCAWFuYkXVG I6NrJYTwK6iMM5FRzLtFHnxD+iJr/c/D X-Google-Smtp-Source: ABdhPJwY9ONxbMbgKA2YXXCdUVpfkviwbC45/b5RPsVV/bTsSoz5nQe6v31EiqywvFgrUIfJbfybt3mHY/8h X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a05:6a00:1824:b0:4f6:dc69:227e with SMTP id y36-20020a056a00182400b004f6dc69227emr26416628pfa.58.1647902659439; Mon, 21 Mar 2022 15:44:19 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:56 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-8-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 7/9] KVM: x86/mmu: Add try_get_mt_mask to x86_ops From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Add another function for getting the memory type mask to x86_ops. This version of the function can fail, but it does not require a vCPU pointer. It will be used in a subsequent commit for in-place large page promotion when disabling dirty logging. No functional change intended. Signed-off-by: Ben Gardon Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/svm.c | 9 +++++++++ arch/x86/kvm/vmx/vmx.c | 1 + 4 files changed, 13 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 29affccb353c..29880363b5ed 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -88,6 +88,7 @@ KVM_X86_OP_OPTIONAL(sync_pir_to_irr) KVM_X86_OP_OPTIONAL_RET0(set_tss_addr) KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr) KVM_X86_OP_OPTIONAL_RET0(get_mt_mask) +KVM_X86_OP(try_get_mt_mask) KVM_X86_OP(load_mmu_pgd) KVM_X86_OP(has_wbinvd_exit) KVM_X86_OP(get_l2_tsc_offset) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f72e80178ffc..a114e4782702 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1422,6 +1422,8 @@ struct kvm_x86_ops { int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); + bool (*try_get_mt_mask)(struct kvm *kvm, gfn_t gfn, + bool is_mmio, u64 *mask); void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b069493ad5c7..e73415dfcf52 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3944,6 +3944,13 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) return true; } +static bool svm_try_get_mt_mask(struct kvm *kvm, gfn_t gfn, + bool is_mmio, u64 *mask) +{ + *mask = 0; + return true; +} + static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4600,6 +4607,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons, .apicv_post_state_restore = avic_apicv_post_state_restore, + .try_get_mt_mask = svm_try_get_mt_mask, + .get_exit_info = svm_get_exit_info, .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 69c654567475..81e9805ed1d8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7813,6 +7813,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .set_tss_addr = vmx_set_tss_addr, .set_identity_map_addr = vmx_set_identity_map_addr, .get_mt_mask = vmx_get_mt_mask, + .try_get_mt_mask = vmx_try_get_mt_mask, .get_exit_info = vmx_get_exit_info, From patchwork Mon Mar 21 22:43:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787822 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EF1D0C433F5 for ; Mon, 21 Mar 2022 22:47:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229775AbiCUWtI (ORCPT ); Mon, 21 Mar 2022 18:49:08 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52552 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229997AbiCUWsv (ORCPT ); Mon, 21 Mar 2022 18:48:51 -0400 Received: from mail-pj1-x1049.google.com (mail-pj1-x1049.google.com [IPv6:2607:f8b0:4864:20::1049]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 755763DE8C7 for ; Mon, 21 Mar 2022 15:44:22 -0700 (PDT) Received: by mail-pj1-x1049.google.com with SMTP id rm11-20020a17090b3ecb00b001c713925e58so334020pjb.6 for ; Mon, 21 Mar 2022 15:44:22 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=ZLPE95jpMe8lV+T0TCTfToINWj+s4+K7bgD6GOm8guA=; b=CajaE6dlLX8Yr8WIk2KWXbrjn53KC91j1SdU00qweOisaskCK3aHgMjE1CRdyXzLHZ C5fKlRw9g7ow1hrQ3B4/846IJpwUt4SSvFLkn+TgoJ+23GpNU3R09NAccrh38bLASLTs 3V/Hgcx9TMY4ZuMziFaD1v5ypRe4VVHA/yy+7aBDikhuEKgExP6hMjh2Ja7ua/TmVe4q f19e/dhTvg9tc52yHUU20EsiLLmcSRgflIDx9skeIvJzqRydezTEy6qRBWrA8hdPjXey Swr4Hc6pa0cHv9jXCLFWH6ffZaEIDHDGXPjLtLFofGzs05REbJTpHmLqPDrWZeJfrsNB Wpgw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=ZLPE95jpMe8lV+T0TCTfToINWj+s4+K7bgD6GOm8guA=; b=IxouyiSCzqrvDI2CRIulqwnFdIny2NfbdLSSVcxpEk88FoKHHL79utoXV1u/fN73Y4 uWx/f4JHYrqcVUKRIl/xoYARU1Adxal+JVaUPdnwiGu93evtQHmnACn0Rj+5Skao8j9y ptTRMws/igZFi8RDKvXRFNd/b6ALSJYkq/ZN81yKhszfeumL6KvJEBXdkafCw3yebtRs WkkusIPBffH8xSE8ivXHCM6we82cVZ5fo7szhqIdpVkO7myxaKMvgZC2tHbBpzWe12af XzlTW92+1ZKbv0STqJR9SGeZtrE+3tA56DnFDOEfj7P352/ITXcpH6oqHr25GfEAuT82 KVtw== X-Gm-Message-State: AOAM530j00xm7ngYeolG+po95wQ0T7gDw2OGRBk3+OE7zu09Ek3KPKzH E/i/sKncDW+OxTv0EtzWHx56aga+m8Sf X-Google-Smtp-Source: ABdhPJx1NaHm335/J0Bcq4TpBTZou5NEHOeWgQqSdDOymCWGceNCKVwWAFHgIFAtBMbqfmtWLOi/tH5lmHwn X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a17:90b:4f92:b0:1bf:25e2:f6af with SMTP id qe18-20020a17090b4f9200b001bf25e2f6afmr1480613pjb.98.1647902661983; Mon, 21 Mar 2022 15:44:21 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:57 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-9-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 8/9] KVM: x86/mmu: Make kvm_is_mmio_pfn usable outside of spte.c From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Export kvm_is_mmio_pfn from spte.c. It will be used in a subsequent commit for in-place lpage promotion when disabling dirty logging. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/mmu/spte.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 45e9c0c3932e..8e9b827c4ed5 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -69,7 +69,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) return spte; } -static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) +bool kvm_is_mmio_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index cee02fe63429..e058a85e6c66 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -443,4 +443,5 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); void kvm_mmu_reset_all_pte_masks(void); +bool kvm_is_mmio_pfn(kvm_pfn_t pfn); #endif From patchwork Mon Mar 21 22:43:58 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Gardon X-Patchwork-Id: 12787823 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 37D24C433EF for ; Mon, 21 Mar 2022 22:47:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229789AbiCUWtN (ORCPT ); Mon, 21 Mar 2022 18:49:13 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:52436 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229829AbiCUWtD (ORCPT ); Mon, 21 Mar 2022 18:49:03 -0400 Received: from mail-pf1-x449.google.com (mail-pf1-x449.google.com [IPv6:2607:f8b0:4864:20::449]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 89F5555BF8 for ; Mon, 21 Mar 2022 15:44:25 -0700 (PDT) Received: by mail-pf1-x449.google.com with SMTP id 16-20020a621910000000b004f783aad863so10406261pfz.15 for ; Mon, 21 Mar 2022 15:44:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=date:in-reply-to:message-id:mime-version:references:subject:from:to :cc; bh=uCzuyzAeivg9rQW1tF47OPLIGzwSsZl5WKJom2K5UFo=; b=FUDtiOGTljaKLXmMd+WjpcA4qQiPozroTXy+12tMQNQeBynzAQlWEhBHea7Z6YUYfy F6eoo8Wzhk4F4yw6xXKagVR79ivkLNhDaiLZnHQWF9iPRGa9diKwHaC5r+1EhDOuMZNo RRgL5eMAaL+ppbPzfxwlRn/mxprXf4AQzvdEWN9a8jCSO/WjVhzouewDi3fjh0sWsaO3 eGVdUvyYiwMeHANsAT1Od8JsF5htVoijWzAVuj8jyzvihrQ4B6TOEaszsYSxWMDnpcNS IJCkwTNkwjA5RV0fEKq9rSwQgYZ/U2GKk0XSm5eCcXmsroJKk8hhhO+XonzxBtVS9kIh Wlvw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:date:in-reply-to:message-id:mime-version :references:subject:from:to:cc; bh=uCzuyzAeivg9rQW1tF47OPLIGzwSsZl5WKJom2K5UFo=; b=27HnescDWPdVImzNwLAjuc6IpfG/wH3GGBVOiVXdrRZ7wDyzkz3dv1F6ezAk7h0Ly4 KG9X0lrBHs1htno5ntnDpGbIGrcUykzFHeIssjTdsCbCcJDjaTfeJihrgYXn6Ywfmkzw mISsk4q/dg6fk05wwoI01d+rcyXuHlm1CwGyTQoGnesaQIoLueOnojbIek1SOMM/PoWP ByqUMOMwTWbZynTEzO4JuFDEIOQkhAyU7cU71UrzpZzE/P7I74RIVEomyCFW6b2DJbxD 4+V0BJvBAckzH9bOcXouXjBDLYWkxpmbTXVIPjwrKF5GRjh0MGuHd3VLQXVk4CD2ZmCW 0NaQ== X-Gm-Message-State: AOAM532LHbSZ97OcgJZSF3M8YW3+a3tx1Aq4Uzf6vSZyTGWKthJw7tN5 NLt7pA3PciI/k4Pai//C4HHXYCxViBZO X-Google-Smtp-Source: ABdhPJzuK9aAeqw10fWLswjFrzy+1wQuny8/nM8UoMiivEPlbzYZ/OsJI1KZlbWt71dulve1DsCnOzXFmJ/A X-Received: from bgardon.sea.corp.google.com ([2620:15c:100:202:b76a:f152:cb5e:5cd2]) (user=bgardon job=sendgmr) by 2002:a17:90b:e81:b0:1c6:5a9c:5afa with SMTP id fv1-20020a17090b0e8100b001c65a9c5afamr188347pjb.1.1647902664757; Mon, 21 Mar 2022 15:44:24 -0700 (PDT) Date: Mon, 21 Mar 2022 15:43:58 -0700 In-Reply-To: <20220321224358.1305530-1-bgardon@google.com> Message-Id: <20220321224358.1305530-10-bgardon@google.com> Mime-Version: 1.0 References: <20220321224358.1305530-1-bgardon@google.com> X-Mailer: git-send-email 2.35.1.894.gb6a874cedc-goog Subject: [PATCH v2 9/9] KVM: x86/mmu: Promote pages in-place when disabling dirty logging From: Ben Gardon To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: Paolo Bonzini , Peter Xu , Sean Christopherson , David Matlack , Jim Mattson , David Dunn , Jing Zhang , Junaid Shahid , Ben Gardon Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org When disabling dirty logging, the TDP MMU currently zaps each leaf entry mapping memory in the relevant memslot. This is very slow. Doing the zaps under the mmu read lock requires a TLB flush for every zap and the zapping causes a storm of ETP/NPT violations. Instead of zapping, replace the split large pages with large page mappings directly. While this sort of operation has historically only been done in the vCPU page fault handler context, refactorings earlier in this series and the relative simplicity of the TDP MMU make it possible here as well. Running the dirty_log_perf_test on an Intel Skylake with 96 vCPUs and 1G of memory per vCPU, this reduces the time required to disable dirty logging from over 45 seconds to just over 1 second. It also avoids provoking page faults, improving vCPU performance while disabling dirty logging. Signed-off-by: Ben Gardon --- arch/x86/kvm/mmu/mmu.c | 4 +- arch/x86/kvm/mmu/mmu_internal.h | 6 +++ arch/x86/kvm/mmu/tdp_mmu.c | 73 ++++++++++++++++++++++++++++++++- 3 files changed, 79 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6f98111f8f8b..a99c23ef90b6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -100,7 +100,7 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); */ bool tdp_enabled = false; -static int max_huge_page_level __read_mostly; +int max_huge_page_level; static int tdp_root_level __read_mostly; static int max_tdp_level __read_mostly; @@ -4486,7 +4486,7 @@ static inline bool boot_cpu_is_amd(void) * the direct page table on host, use as much mmu features as * possible, however, kvm currently does not do execution-protection. */ -static void +void build_tdp_shadow_zero_bits_mask(struct rsvd_bits_validate *shadow_zero_check, int shadow_root_level) { diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 1bff453f7cbe..6c08a5731fcb 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -171,4 +171,10 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); +void +build_tdp_shadow_zero_bits_mask(struct rsvd_bits_validate *shadow_zero_check, + int shadow_root_level); + +extern int max_huge_page_level __read_mostly; + #endif /* __KVM_X86_MMU_INTERNAL_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index af60922906ef..eb8929e394ec 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1709,6 +1709,66 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); } +static bool try_promote_lpage(struct kvm *kvm, + const struct kvm_memory_slot *slot, + struct tdp_iter *iter) +{ + struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep); + struct rsvd_bits_validate shadow_zero_check; + bool map_writable; + kvm_pfn_t pfn; + u64 new_spte; + u64 mt_mask; + + /* + * If addresses are being invalidated, don't do in-place promotion to + * avoid accidentally mapping an invalidated address. + */ + if (unlikely(kvm->mmu_notifier_count)) + return false; + + if (iter->level > max_huge_page_level || iter->gfn < slot->base_gfn || + iter->gfn >= slot->base_gfn + slot->npages) + return false; + + pfn = __gfn_to_pfn_memslot(slot, iter->gfn, true, NULL, true, + &map_writable, NULL); + if (is_error_noslot_pfn(pfn)) + return false; + + /* + * Can't reconstitute an lpage if the consituent pages can't be + * mapped higher. + */ + if (iter->level > kvm_mmu_max_mapping_level(kvm, slot, iter->gfn, + pfn, PG_LEVEL_NUM)) + return false; + + build_tdp_shadow_zero_bits_mask(&shadow_zero_check, iter->root_level); + + /* + * In some cases, a vCPU pointer is required to get the MT mask, + * however in most cases it can be generated without one. If a + * vCPU pointer is needed kvm_x86_try_get_mt_mask will fail. + * In that case, bail on in-place promotion. + */ + if (unlikely(!static_call(kvm_x86_try_get_mt_mask)(kvm, iter->gfn, + kvm_is_mmio_pfn(pfn), + &mt_mask))) + return false; + + __make_spte(kvm, sp, slot, ACC_ALL, iter->gfn, pfn, 0, false, true, + map_writable, mt_mask, &shadow_zero_check, &new_spte); + + if (tdp_mmu_set_spte_atomic(kvm, iter, new_spte)) + return true; + + /* Re-read the SPTE as it must have been changed by another thread. */ + iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); + + return false; +} + /* * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. @@ -1729,8 +1789,17 @@ static void zap_collapsible_spte_range(struct kvm *kvm, if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; - if (!is_shadow_present_pte(iter.old_spte) || - !is_last_spte(iter.old_spte, iter.level)) + if (iter.level > max_huge_page_level || + iter.gfn < slot->base_gfn || + iter.gfn >= slot->base_gfn + slot->npages) + continue; + + if (!is_shadow_present_pte(iter.old_spte)) + continue; + + /* Try to promote the constitutent pages to an lpage. */ + if (!is_last_spte(iter.old_spte, iter.level) && + try_promote_lpage(kvm, slot, &iter)) continue; pfn = spte_to_pfn(iter.old_spte);