Message ID | c73b5d5f902bb6d21a784bed2904fc1860aaf571.1699368363.git.isaku.yamahata@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM TDX: TDP MMU: large page support | expand |
On 11/7/2023 11:00 PM, isaku.yamahata@intel.com wrote: > From: Xiaoyao Li <xiaoyao.li@intel.com> > > tdh_mem_page_aug() will support 2MB large page in the near future. Cache > flush also needs to be 2MB instead of 4KB in such cases. Introduce a > helper function to flush cache with page size info in preparation for large > pages. > > Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Nit: About the shortlog, is it clearer to say "Flush cache for a page based on page size before TDX SEAMCALL"? Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com> > --- > arch/x86/kvm/vmx/tdx_ops.h | 22 ++++++++++++++-------- > 1 file changed, 14 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h > index fd73a1731bf8..e726102d3523 100644 > --- a/arch/x86/kvm/vmx/tdx_ops.h > +++ b/arch/x86/kvm/vmx/tdx_ops.h > @@ -6,6 +6,7 @@ > > #include <linux/compiler.h> > > +#include <asm/pgtable_types.h> > #include <asm/archrandom.h> > #include <asm/cacheflush.h> > #include <asm/asm.h> > @@ -62,6 +63,11 @@ static inline u64 tdx_seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, > void pr_tdx_error(u64 op, u64 error_code, const struct tdx_module_args *out); > #endif > > +static inline void tdx_clflush_page(hpa_t addr, enum pg_level level) > +{ > + clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level)); > +} > + > /* > * TDX module acquires its internal lock for resources. It doesn't spin to get > * locks because of its restrictions of allowed execution time. Instead, it > @@ -94,21 +100,21 @@ static inline u64 tdx_seamcall_sept(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, > > static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr) > { > - clflush_cache_range(__va(addr), PAGE_SIZE); > + tdx_clflush_page(addr, PG_LEVEL_4K); > return tdx_seamcall(TDH_MNG_ADDCX, addr, tdr, 0, 0, NULL); > } > > static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source, > struct tdx_module_args *out) > { > - clflush_cache_range(__va(hpa), PAGE_SIZE); > + tdx_clflush_page(hpa, PG_LEVEL_4K); > return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, gpa, tdr, hpa, source, out); > } > > static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page, > struct tdx_module_args *out) > { > - clflush_cache_range(__va(page), PAGE_SIZE); > + tdx_clflush_page(page, PG_LEVEL_4K); > return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, gpa | level, tdr, page, 0, out); > } > > @@ -126,21 +132,21 @@ static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level, > > static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr) > { > - clflush_cache_range(__va(addr), PAGE_SIZE); > + tdx_clflush_page(addr, PG_LEVEL_4K); > return tdx_seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, NULL); > } > > static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa, > struct tdx_module_args *out) > { > - clflush_cache_range(__va(hpa), PAGE_SIZE); > + tdx_clflush_page(hpa, PG_LEVEL_4K); > return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, gpa, tdr, hpa, 0, out); > } > > static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa, > struct tdx_module_args *out) > { > - clflush_cache_range(__va(hpa), PAGE_SIZE); > + tdx_clflush_page(hpa, PG_LEVEL_4K); > return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, gpa, tdr, hpa, 0, out); > } > > @@ -157,13 +163,13 @@ static inline u64 tdh_mng_key_config(hpa_t tdr) > > static inline u64 tdh_mng_create(hpa_t tdr, int hkid) > { > - clflush_cache_range(__va(tdr), PAGE_SIZE); > + tdx_clflush_page(tdr, PG_LEVEL_4K); > return tdx_seamcall(TDH_MNG_CREATE, tdr, hkid, 0, 0, NULL); > } > > static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr) > { > - clflush_cache_range(__va(tdvpr), PAGE_SIZE); > + tdx_clflush_page(tdvpr, PG_LEVEL_4K); > return tdx_seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, NULL); > } >
diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h index fd73a1731bf8..e726102d3523 100644 --- a/arch/x86/kvm/vmx/tdx_ops.h +++ b/arch/x86/kvm/vmx/tdx_ops.h @@ -6,6 +6,7 @@ #include <linux/compiler.h> +#include <asm/pgtable_types.h> #include <asm/archrandom.h> #include <asm/cacheflush.h> #include <asm/asm.h> @@ -62,6 +63,11 @@ static inline u64 tdx_seamcall(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, void pr_tdx_error(u64 op, u64 error_code, const struct tdx_module_args *out); #endif +static inline void tdx_clflush_page(hpa_t addr, enum pg_level level) +{ + clflush_cache_range(__va(addr), KVM_HPAGE_SIZE(level)); +} + /* * TDX module acquires its internal lock for resources. It doesn't spin to get * locks because of its restrictions of allowed execution time. Instead, it @@ -94,21 +100,21 @@ static inline u64 tdx_seamcall_sept(u64 op, u64 rcx, u64 rdx, u64 r8, u64 r9, static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr) { - clflush_cache_range(__va(addr), PAGE_SIZE); + tdx_clflush_page(addr, PG_LEVEL_4K); return tdx_seamcall(TDH_MNG_ADDCX, addr, tdr, 0, 0, NULL); } static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source, struct tdx_module_args *out) { - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_ADD, gpa, tdr, hpa, source, out); } static inline u64 tdh_mem_sept_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page, struct tdx_module_args *out) { - clflush_cache_range(__va(page), PAGE_SIZE); + tdx_clflush_page(page, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_SEPT_ADD, gpa | level, tdr, page, 0, out); } @@ -126,21 +132,21 @@ static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level, static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr) { - clflush_cache_range(__va(addr), PAGE_SIZE); + tdx_clflush_page(addr, PG_LEVEL_4K); return tdx_seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, NULL); } static inline u64 tdh_mem_page_relocate(hpa_t tdr, gpa_t gpa, hpa_t hpa, struct tdx_module_args *out) { - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_RELOCATE, gpa, tdr, hpa, 0, out); } static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa, struct tdx_module_args *out) { - clflush_cache_range(__va(hpa), PAGE_SIZE); + tdx_clflush_page(hpa, PG_LEVEL_4K); return tdx_seamcall_sept(TDH_MEM_PAGE_AUG, gpa, tdr, hpa, 0, out); } @@ -157,13 +163,13 @@ static inline u64 tdh_mng_key_config(hpa_t tdr) static inline u64 tdh_mng_create(hpa_t tdr, int hkid) { - clflush_cache_range(__va(tdr), PAGE_SIZE); + tdx_clflush_page(tdr, PG_LEVEL_4K); return tdx_seamcall(TDH_MNG_CREATE, tdr, hkid, 0, 0, NULL); } static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr) { - clflush_cache_range(__va(tdvpr), PAGE_SIZE); + tdx_clflush_page(tdvpr, PG_LEVEL_4K); return tdx_seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, NULL); }