Message ID | 20190731150813.26289-48-kirill.shutemov@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Intel MKTME enabling | expand |
On 7/31/19 10:08 AM, Kirill A. Shutemov wrote: > From: Kai Huang <kai.huang@linux.intel.com> > > Setup keyID to SPTE, which will be eventually programmed to shadow MMU > or EPT table, according to page's associated keyID, so that guest is > able to use correct keyID to access guest memory. > > Note current shadow_me_mask doesn't suit MKTME's needs, since for MKTME > there's no fixed memory encryption mask, but can vary from keyID 1 to > maximum keyID, therefore shadow_me_mask remains 0 for MKTME. > > Signed-off-by: Kai Huang <kai.huang@linux.intel.com> > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> > --- > arch/x86/kvm/mmu.c | 18 +++++++++++++++++- > 1 file changed, 17 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 8f72526e2f68..b8742e6219f6 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -2936,6 +2936,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) > #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) > #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) > > +static u64 get_phys_encryption_mask(kvm_pfn_t pfn) > +{ > +#ifdef CONFIG_X86_INTEL_MKTME > + struct page *page; > + > + if (!pfn_valid(pfn)) > + return 0; > + > + page = pfn_to_page(pfn); > + > + return ((u64)page_keyid(page)) << mktme_keyid_shift(); > +#else > + return shadow_me_mask; > +#endif > +} This patch breaks AMD virtualization (SVM) in general (non-SEV and SEV guests) when SME is active. Shouldn't this be a run time, vs build time, check for MKTME being active? Thanks, Tom > + > static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, > unsigned pte_access, int level, > gfn_t gfn, kvm_pfn_t pfn, bool speculative, > @@ -2982,7 +2998,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, > pte_access &= ~ACC_WRITE_MASK; > > if (!kvm_is_mmio_pfn(pfn)) > - spte |= shadow_me_mask; > + spte |= get_phys_encryption_mask(pfn); > > spte |= (u64)pfn << PAGE_SHIFT; > >
On Tue, Aug 06, 2019 at 08:26:52PM +0000, Lendacky, Thomas wrote: > On 7/31/19 10:08 AM, Kirill A. Shutemov wrote: > > From: Kai Huang <kai.huang@linux.intel.com> > > > > Setup keyID to SPTE, which will be eventually programmed to shadow MMU > > or EPT table, according to page's associated keyID, so that guest is > > able to use correct keyID to access guest memory. > > > > Note current shadow_me_mask doesn't suit MKTME's needs, since for MKTME > > there's no fixed memory encryption mask, but can vary from keyID 1 to > > maximum keyID, therefore shadow_me_mask remains 0 for MKTME. > > > > Signed-off-by: Kai Huang <kai.huang@linux.intel.com> > > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> > > --- > > arch/x86/kvm/mmu.c | 18 +++++++++++++++++- > > 1 file changed, 17 insertions(+), 1 deletion(-) > > > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > > index 8f72526e2f68..b8742e6219f6 100644 > > --- a/arch/x86/kvm/mmu.c > > +++ b/arch/x86/kvm/mmu.c > > @@ -2936,6 +2936,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) > > #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) > > #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) > > > > +static u64 get_phys_encryption_mask(kvm_pfn_t pfn) > > +{ > > +#ifdef CONFIG_X86_INTEL_MKTME > > + struct page *page; > > + > > + if (!pfn_valid(pfn)) > > + return 0; > > + > > + page = pfn_to_page(pfn); > > + > > + return ((u64)page_keyid(page)) << mktme_keyid_shift(); > > +#else > > + return shadow_me_mask; > > +#endif > > +} > > This patch breaks AMD virtualization (SVM) in general (non-SEV and SEV > guests) when SME is active. Shouldn't this be a run time, vs build time, > check for MKTME being active? Thanks, I've missed this. This fixup should help: diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 00d17bdfec0f..54931acf260e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2947,18 +2947,17 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) static u64 get_phys_encryption_mask(kvm_pfn_t pfn) { -#ifdef CONFIG_X86_INTEL_MKTME struct page *page; + if (!mktme_enabled()) + return shadow_me_mask; + if (!pfn_valid(pfn)) return 0; page = pfn_to_page(pfn); return ((u64)page_keyid(page)) << mktme_keyid_shift(); -#else - return shadow_me_mask; -#endif } static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8f72526e2f68..b8742e6219f6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2936,6 +2936,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) +static u64 get_phys_encryption_mask(kvm_pfn_t pfn) +{ +#ifdef CONFIG_X86_INTEL_MKTME + struct page *page; + + if (!pfn_valid(pfn)) + return 0; + + page = pfn_to_page(pfn); + + return ((u64)page_keyid(page)) << mktme_keyid_shift(); +#else + return shadow_me_mask; +#endif +} + static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative, @@ -2982,7 +2998,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, pte_access &= ~ACC_WRITE_MASK; if (!kvm_is_mmio_pfn(pfn)) - spte |= shadow_me_mask; + spte |= get_phys_encryption_mask(pfn); spte |= (u64)pfn << PAGE_SHIFT;