Message ID | 20221221222418.3307832-5-bgardon@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/MMU: Formalize the Shadow MMU | expand |
On Wed, Dec 21, 2022 at 10:24:08PM +0000, Ben Gardon wrote: > In preparation for moving paging_tmpl.h to shadow_mmu.c, expose various > functions it needs through mmu_internal.h. This includes modifying the > BUILD_MMU_ROLE_ACCESSOR macro so that it does not automatically include > the static label, since some but not all of the accessors are needed by > paging_tmpl.h. > > No functional change intended. > > Signed-off-by: Ben Gardon <bgardon@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 32 ++++++++++++++++---------------- > arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++++++++++ > 2 files changed, 32 insertions(+), 16 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index bf14e181eb12..a17e8a79e4df 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -153,18 +153,18 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); > * and the vCPU may be incorrect/irrelevant. > */ > #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \ > -static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ > +inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ > { \ > return !!(mmu->cpu_role. base_or_ext . reg##_##name); \ > } > BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); > BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep); > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); > BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); > -BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); > +static BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); Suggest moving all the BUILD_MMU_ROLE*() macros to mmu_internal.h, since they are already static inline. That would be a cleaner patch and reduce future churn if shadow_mmu.c ever needs to use a different role accessor at some point. > > static inline bool is_cr0_pg(struct kvm_mmu *mmu) > { > @@ -210,7 +210,7 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, > kvm_flush_remote_tlbs_with_range(kvm, &range); > } > > -static gfn_t get_mmio_spte_gfn(u64 spte) > +gfn_t get_mmio_spte_gfn(u64 spte) > { > u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; > > @@ -240,7 +240,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) > return likely(kvm_gen == spte_gen); > } > > -static int is_cpuid_PSE36(void) > +int is_cpuid_PSE36(void) > { > return 1; > } Can we just drop is_cpuid_PSE36(), e.g. as a precursor patch? It just returns 1...
On Fri, Jan 6, 2023 at 11:49 AM David Matlack <dmatlack@google.com> wrote: > > On Wed, Dec 21, 2022 at 10:24:08PM +0000, Ben Gardon wrote: > > In preparation for moving paging_tmpl.h to shadow_mmu.c, expose various > > functions it needs through mmu_internal.h. This includes modifying the > > BUILD_MMU_ROLE_ACCESSOR macro so that it does not automatically include > > the static label, since some but not all of the accessors are needed by > > paging_tmpl.h. > > > > No functional change intended. > > > > Signed-off-by: Ben Gardon <bgardon@google.com> > > --- > > arch/x86/kvm/mmu/mmu.c | 32 ++++++++++++++++---------------- > > arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++++++++++ > > 2 files changed, 32 insertions(+), 16 deletions(-) > > > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > > index bf14e181eb12..a17e8a79e4df 100644 > > --- a/arch/x86/kvm/mmu/mmu.c > > +++ b/arch/x86/kvm/mmu/mmu.c > > @@ -153,18 +153,18 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); > > * and the vCPU may be incorrect/irrelevant. > > */ > > #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \ > > -static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ > > +inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ > > { \ > > return !!(mmu->cpu_role. base_or_ext . reg##_##name); \ > > } > > BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); > > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); > > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); > > BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep); > > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); > > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); > > -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); > > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); > > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); > > +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); > > BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); > > -BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); > > +static BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); > > Suggest moving all the BUILD_MMU_ROLE*() macros to mmu_internal.h, since > they are already static inline. That would be a cleaner patch and reduce > future churn if shadow_mmu.c ever needs to use a different role accessor > at some point. That sounds reasonable. Will do in V1. > > > > > static inline bool is_cr0_pg(struct kvm_mmu *mmu) > > { > > @@ -210,7 +210,7 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, > > kvm_flush_remote_tlbs_with_range(kvm, &range); > > } > > > > -static gfn_t get_mmio_spte_gfn(u64 spte) > > +gfn_t get_mmio_spte_gfn(u64 spte) > > { > > u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; > > > > @@ -240,7 +240,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) > > return likely(kvm_gen == spte_gen); > > } > > > > -static int is_cpuid_PSE36(void) > > +int is_cpuid_PSE36(void) > > { > > return 1; > > } > > Can we just drop is_cpuid_PSE36(), e.g. as a precursor patch? It just > returns 1... Yeah, good idea. Looks like we can eliminate a little dead code doing that too.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bf14e181eb12..a17e8a79e4df 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -153,18 +153,18 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); * and the vCPU may be incorrect/irrelevant. */ #define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \ -static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ +inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ { \ return !!(mmu->cpu_role. base_or_ext . reg##_##name); \ } BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp); -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse); BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep); -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); -BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap); +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke); +static BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57); BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); -BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); +static BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); static inline bool is_cr0_pg(struct kvm_mmu *mmu) { @@ -210,7 +210,7 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, kvm_flush_remote_tlbs_with_range(kvm, &range); } -static gfn_t get_mmio_spte_gfn(u64 spte) +gfn_t get_mmio_spte_gfn(u64 spte) { u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; @@ -240,7 +240,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) return likely(kvm_gen == spte_gen); } -static int is_cpuid_PSE36(void) +int is_cpuid_PSE36(void) { return 1; } @@ -279,7 +279,7 @@ void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) } } -static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) +int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) { int r; @@ -818,8 +818,8 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) return -EFAULT; } -static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, - unsigned int access) +int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + unsigned int access) { /* The pfn is invalid, report the error! */ if (unlikely(is_error_pfn(fault->pfn))) @@ -1275,8 +1275,8 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) return RET_PF_RETRY; } -static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault) +bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault) { if (unlikely(fault->rsvd)) return false; @@ -1338,7 +1338,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); } -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot = fault->slot; bool async; @@ -1403,8 +1403,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * Returns true if the page fault is stale and needs to be retried, i.e. if the * root was invalidated by a memslot update or a relevant mmu_notifier fired. */ -static bool is_page_fault_stale(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault, int mmu_seq) +bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + int mmu_seq) { struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 74a99b67f09e..957376fcb333 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -341,6 +341,22 @@ bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp); void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu); void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu); +int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect); bool need_topup_split_caches_or_resched(struct kvm *kvm); int topup_split_caches(struct kvm *kvm); + +bool is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + int mmu_seq); +bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault); +int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); +int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, + unsigned int access); + +gfn_t get_mmio_spte_gfn(u64 spte); + +bool is_efer_nx(struct kvm_mmu *mmu); +bool is_cr4_smep(struct kvm_mmu *mmu); +bool is_cr0_wp(struct kvm_mmu *mmu); +int is_cpuid_PSE36(void); #endif /* __KVM_X86_MMU_INTERNAL_H */
In preparation for moving paging_tmpl.h to shadow_mmu.c, expose various functions it needs through mmu_internal.h. This includes modifying the BUILD_MMU_ROLE_ACCESSOR macro so that it does not automatically include the static label, since some but not all of the accessors are needed by paging_tmpl.h. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 32 ++++++++++++++++---------------- arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++++++++++ 2 files changed, 32 insertions(+), 16 deletions(-)