Message ID | 20221214194056.161492-12-michael.roth@amd.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand |
On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: > From: Nikunj A Dadhania <nikunj@amd.com> > > Pre-boot guest payload needs to be encrypted and VMM has copied it > over to the private-fd. Add support to get the pfn from the memfile fd > for encrypting the payload in-place. > > Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> > Signed-off-by: Michael Roth <michael.roth@amd.com> > --- > arch/x86/kvm/svm/sev.c | 79 ++++++++++++++++++++++++++++++++++-------- > 1 file changed, 64 insertions(+), 15 deletions(-) > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index a7e4e3005786..ae4920aeb281 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) > return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; > } > > +static bool kvm_is_upm_enabled(struct kvm *kvm) > +{ > + return kvm->arch.upm_mode; > +} > + > /* Must be called with the sev_bitmap_lock held */ > static bool __sev_recycle_asids(int min_asid, int max_asid) > { > @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) > return ret; > } > > +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data) > +{ > + struct kvm_memory_slot *memslot = range->slot; > + struct page **pages = data; > + int ret = 0, i = 0; > + kvm_pfn_t pfn; > + gfn_t gfn; > + > + for (gfn = range->start; gfn < range->end; gfn++) { > + int order; > + > + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order); > + if (ret) > + return ret; > + > + if (is_error_noslot_pfn(pfn)) > + return -EFAULT; > + > + pages[i++] = pfn_to_page(pfn); > + } > + > + return ret; > +} > + > +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr, > + unsigned long size, unsigned long npages, > + struct page **pages) > +{ > + return kvm_vm_do_hva_range_op(kvm, addr, size, > + sev_get_memfile_pfn_handler, pages); > +} The third argument for the kvm_vm_do_hva_range_op call should be addr+size; the function expects the end of the range not the size of the range. > + > static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, > unsigned long ulen, unsigned long *n, > int write) > @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, > if (!pages) > return ERR_PTR(-ENOMEM); > > - /* Pin the user virtual address. */ > - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); > - if (npinned != npages) { > - pr_err("SEV: Failure locking %lu pages.\n", npages); > - ret = -ENOMEM; > - goto err; > + if (kvm_is_upm_enabled(kvm)) { > + /* Get the PFN from memfile */ > + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) { > + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr); > + ret = -ENOMEM; > + goto err; > + } This branch doesn't initialize npinned. If sev_get_memfile_pfn fails, the code following the err label passes the uninitialized value to unpin_user_pages. > + } else { > + /* Pin the user virtual address. */ > + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); > + if (npinned != npages) { > + pr_err("SEV: Failure locking %lu pages.\n", npages); > + ret = -ENOMEM; > + goto err; > + } > + sev->pages_locked = locked; > } > > *n = npages; > - sev->pages_locked = locked; > > return pages; > > @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, > > size = (range->end - range->start) << PAGE_SHIFT; > vaddr_end = vaddr + size; > + WARN_ON(size < PAGE_SIZE); > > /* Lock the user memory. */ > inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); > @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, > } > > e_unpin: > - /* content of memory is updated, mark pages dirty */ > - for (i = 0; i < npages; i++) { > - set_page_dirty_lock(inpages[i]); > - mark_page_accessed(inpages[i]); > + if (!kvm_is_upm_enabled(kvm)) { > + /* content of memory is updated, mark pages dirty */ > + for (i = 0; i < npages; i++) { > + set_page_dirty_lock(inpages[i]); > + mark_page_accessed(inpages[i]); > + } > + /* unlock the user pages */ > + sev_unpin_memory(kvm, inpages, npages); > } > - /* unlock the user pages */ > - sev_unpin_memory(kvm, inpages, npages); > + > return ret; > } > > @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm, > goto e_ret; > kvm_release_pfn_clean(pfn); > } > - kvm_vm_set_region_attr(kvm, range->start, range->end, > - true /* priv_attr */); > > + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE); > e_ret: > return ret; > } > -- > 2.25.1 > Regards, Tom
On 22/12/22 23:54, erbse.13@gmx.de wrote: > On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: >> From: Nikunj A Dadhania <nikunj@amd.com> >> >> Pre-boot guest payload needs to be encrypted and VMM has copied it >> over to the private-fd. Add support to get the pfn from the memfile fd >> for encrypting the payload in-place. >> >> Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> >> Signed-off-by: Michael Roth <michael.roth@amd.com> >> --- >> arch/x86/kvm/svm/sev.c | 79 ++++++++++++++++++++++++++++++++++-------- >> 1 file changed, 64 insertions(+), 15 deletions(-) >> >> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c >> index a7e4e3005786..ae4920aeb281 100644 >> --- a/arch/x86/kvm/svm/sev.c >> +++ b/arch/x86/kvm/svm/sev.c >> @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) >> return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; >> } >> >> +static bool kvm_is_upm_enabled(struct kvm *kvm) >> +{ >> + return kvm->arch.upm_mode; >> +} >> + >> /* Must be called with the sev_bitmap_lock held */ >> static bool __sev_recycle_asids(int min_asid, int max_asid) >> { >> @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) >> return ret; >> } >> >> +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data) >> +{ >> + struct kvm_memory_slot *memslot = range->slot; >> + struct page **pages = data; >> + int ret = 0, i = 0; >> + kvm_pfn_t pfn; >> + gfn_t gfn; >> + >> + for (gfn = range->start; gfn < range->end; gfn++) { >> + int order; >> + >> + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order); >> + if (ret) >> + return ret; >> + >> + if (is_error_noslot_pfn(pfn)) >> + return -EFAULT; >> + >> + pages[i++] = pfn_to_page(pfn); >> + } >> + >> + return ret; >> +} >> + >> +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr, >> + unsigned long size, unsigned long npages, >> + struct page **pages) >> +{ >> + return kvm_vm_do_hva_range_op(kvm, addr, size, >> + sev_get_memfile_pfn_handler, pages); >> +} > > The third argument for the kvm_vm_do_hva_range_op call should be addr+size; the > function expects the end of the range not the size of the range. Good catch, will fix. >> + >> static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, >> unsigned long ulen, unsigned long *n, >> int write) >> @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, >> if (!pages) >> return ERR_PTR(-ENOMEM); >> >> - /* Pin the user virtual address. */ >> - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); >> - if (npinned != npages) { >> - pr_err("SEV: Failure locking %lu pages.\n", npages); >> - ret = -ENOMEM; >> - goto err; >> + if (kvm_is_upm_enabled(kvm)) { >> + /* Get the PFN from memfile */ >> + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) { >> + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr); >> + ret = -ENOMEM; >> + goto err; >> + } > > This branch doesn't initialize npinned. If sev_get_memfile_pfn fails, the code following the err > label passes the uninitialized value to unpin_user_pages. Sure, will fix. > >> + } else { >> + /* Pin the user virtual address. */ >> + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); >> + if (npinned != npages) { >> + pr_err("SEV: Failure locking %lu pages.\n", npages); >> + ret = -ENOMEM; >> + goto err; >> + } >> + sev->pages_locked = locked; >> } >> >> *n = npages; >> - sev->pages_locked = locked; >> >> return pages; >> >> @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, >> >> size = (range->end - range->start) << PAGE_SHIFT; >> vaddr_end = vaddr + size; >> + WARN_ON(size < PAGE_SIZE); >> >> /* Lock the user memory. */ >> inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); >> @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, >> } >> >> e_unpin: >> - /* content of memory is updated, mark pages dirty */ >> - for (i = 0; i < npages; i++) { >> - set_page_dirty_lock(inpages[i]); >> - mark_page_accessed(inpages[i]); >> + if (!kvm_is_upm_enabled(kvm)) { >> + /* content of memory is updated, mark pages dirty */ >> + for (i = 0; i < npages; i++) { >> + set_page_dirty_lock(inpages[i]); >> + mark_page_accessed(inpages[i]); >> + } >> + /* unlock the user pages */ >> + sev_unpin_memory(kvm, inpages, npages); >> } >> - /* unlock the user pages */ >> - sev_unpin_memory(kvm, inpages, npages); >> + >> return ret; >> } >> >> @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm, >> goto e_ret; >> kvm_release_pfn_clean(pfn); >> } >> - kvm_vm_set_region_attr(kvm, range->start, range->end, >> - true /* priv_attr */); >> >> + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE); >> e_ret: >> return ret; >> } >> -- >> 2.25.1 >> > > Regards, Tom Thanks Nikunj
On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: > From: Nikunj A Dadhania <nikunj@amd.com> > > Pre-boot guest payload needs to be encrypted and VMM has copied it > over to the private-fd. Add support to get the pfn from the memfile fd > for encrypting the payload in-place. > > Signed-off-by: Nikunj A Dadhania <nikunj@amd.com> > Signed-off-by: Michael Roth <michael.roth@amd.com> > --- > arch/x86/kvm/svm/sev.c | 79 ++++++++++++++++++++++++++++++++++-------- > 1 file changed, 64 insertions(+), 15 deletions(-) > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index a7e4e3005786..ae4920aeb281 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) > return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; > } > > +static bool kvm_is_upm_enabled(struct kvm *kvm) > +{ > + return kvm->arch.upm_mode; > +} > + > /* Must be called with the sev_bitmap_lock held */ > static bool __sev_recycle_asids(int min_asid, int max_asid) > { > @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) > return ret; > } > > +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data) > +{ > + struct kvm_memory_slot *memslot = range->slot; > + struct page **pages = data; > + int ret = 0, i = 0; > + kvm_pfn_t pfn; > + gfn_t gfn; > + > + for (gfn = range->start; gfn < range->end; gfn++) { > + int order; > + > + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order); > + if (ret) > + return ret; > + > + if (is_error_noslot_pfn(pfn)) > + return -EFAULT; > + > + pages[i++] = pfn_to_page(pfn); > + } > + > + return ret; > +} > + > +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr, > + unsigned long size, unsigned long npages, > + struct page **pages) > +{ > + return kvm_vm_do_hva_range_op(kvm, addr, size, > + sev_get_memfile_pfn_handler, pages); > +} > + > static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, > unsigned long ulen, unsigned long *n, > int write) > @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, > if (!pages) > return ERR_PTR(-ENOMEM); > > - /* Pin the user virtual address. */ > - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); > - if (npinned != npages) { > - pr_err("SEV: Failure locking %lu pages.\n", npages); > - ret = -ENOMEM; > - goto err; > + if (kvm_is_upm_enabled(kvm)) { > + /* Get the PFN from memfile */ > + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) { > + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr); > + ret = -ENOMEM; > + goto err; > + } > + } else { > + /* Pin the user virtual address. */ > + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); > + if (npinned != npages) { > + pr_err("SEV: Failure locking %lu pages.\n", npages); > + ret = -ENOMEM; > + goto err; > + } > + sev->pages_locked = locked; > } > > *n = npages; > - sev->pages_locked = locked; > > return pages; > > @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, > > size = (range->end - range->start) << PAGE_SHIFT; > vaddr_end = vaddr + size; > + WARN_ON(size < PAGE_SIZE); > > /* Lock the user memory. */ > inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); > @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, > } > > e_unpin: > - /* content of memory is updated, mark pages dirty */ > - for (i = 0; i < npages; i++) { > - set_page_dirty_lock(inpages[i]); > - mark_page_accessed(inpages[i]); > + if (!kvm_is_upm_enabled(kvm)) { > + /* content of memory is updated, mark pages dirty */ > + for (i = 0; i < npages; i++) { > + set_page_dirty_lock(inpages[i]); > + mark_page_accessed(inpages[i]); > + } > + /* unlock the user pages */ > + sev_unpin_memory(kvm, inpages, npages); > } > - /* unlock the user pages */ > - sev_unpin_memory(kvm, inpages, npages); > + > return ret; > } > > @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm, > goto e_ret; > kvm_release_pfn_clean(pfn); > } > - kvm_vm_set_region_attr(kvm, range->start, range->end, > - true /* priv_attr */); > > + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE); > e_ret: > return ret; > } > -- > 2.25.1 > kvm_vm_set_region_attr() should be fixed already in: https://lore.kernel.org/all/20221214194056.161492-11-michael.roth@amd.com/ BR, Jarkko
On 18/01/23 05:00, Jarkko Sakkinen wrote: > On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: >> From: Nikunj A Dadhania <nikunj@amd.com> >> @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm, >> goto e_ret; >> kvm_release_pfn_clean(pfn); >> } >> - kvm_vm_set_region_attr(kvm, range->start, range->end, >> - true /* priv_attr */); >> >> + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE); As the memory attribute is no more a boolean in the UPM series, I had this change. >> e_ret: >> return ret; >> } >> -- >> 2.25.1 >> > > kvm_vm_set_region_attr() should be fixed already in: >> https://lore.kernel.org/all/20221214194056.161492-11-michael.roth@amd.com/ Will discuss with Mike and move this hunk to above patch. Regards Nikunj
On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: > From: Nikunj A Dadhania <nikunj@amd.com> > > Pre-boot guest payload needs to be encrypted and VMM has copied it "has to have copied it over" I presume? > over to the private-fd. Add support to get the pfn from the memfile fd > for encrypting the payload in-place. Why is that a good thing? I guess with UPM you're supposed to get the PFN of that encrypted guest payload from that memslot. IOW, such commit messages are too laconic for my taste and you could try to explain more why this is happening instead of me having to "reverse-deduce" what you're doing from the code... Thx.
On 01/02/23 23:52, Borislav Petkov wrote: > On Wed, Dec 14, 2022 at 01:40:03PM -0600, Michael Roth wrote: >> From: Nikunj A Dadhania <nikunj@amd.com> >> >> Pre-boot guest payload needs to be encrypted and VMM has copied it > > "has to have copied it over" I presume? True, payload is being copied in patch 10/64 now. >> over to the private-fd. Add support to get the pfn from the memfile fd >> for encrypting the payload in-place. > > Why is that a good thing? > > I guess with UPM you're supposed to get the PFN of that encrypted guest > payload from that memslot. > > IOW, such commit messages are too laconic for my taste and you could try > to explain more why this is happening instead of me having to > "reverse-deduce" what you're doing from the code... > I am updating the SEV related patches, will add more details in commit and send. Regards Nikunj
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a7e4e3005786..ae4920aeb281 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) return !!to_kvm_svm(kvm)->sev_info.enc_context_owner; } +static bool kvm_is_upm_enabled(struct kvm *kvm) +{ + return kvm->arch.upm_mode; +} + /* Must be called with the sev_bitmap_lock held */ static bool __sev_recycle_asids(int min_asid, int max_asid) { @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data) +{ + struct kvm_memory_slot *memslot = range->slot; + struct page **pages = data; + int ret = 0, i = 0; + kvm_pfn_t pfn; + gfn_t gfn; + + for (gfn = range->start; gfn < range->end; gfn++) { + int order; + + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order); + if (ret) + return ret; + + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + pages[i++] = pfn_to_page(pfn); + } + + return ret; +} + +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr, + unsigned long size, unsigned long npages, + struct page **pages) +{ + return kvm_vm_do_hva_range_op(kvm, addr, size, + sev_get_memfile_pfn_handler, pages); +} + static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long ulen, unsigned long *n, int write) @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, if (!pages) return ERR_PTR(-ENOMEM); - /* Pin the user virtual address. */ - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); - if (npinned != npages) { - pr_err("SEV: Failure locking %lu pages.\n", npages); - ret = -ENOMEM; - goto err; + if (kvm_is_upm_enabled(kvm)) { + /* Get the PFN from memfile */ + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) { + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr); + ret = -ENOMEM; + goto err; + } + } else { + /* Pin the user virtual address. */ + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + if (npinned != npages) { + pr_err("SEV: Failure locking %lu pages.\n", npages); + ret = -ENOMEM; + goto err; + } + sev->pages_locked = locked; } *n = npages; - sev->pages_locked = locked; return pages; @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, size = (range->end - range->start) << PAGE_SHIFT; vaddr_end = vaddr + size; + WARN_ON(size < PAGE_SIZE); /* Lock the user memory. */ inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm, } e_unpin: - /* content of memory is updated, mark pages dirty */ - for (i = 0; i < npages; i++) { - set_page_dirty_lock(inpages[i]); - mark_page_accessed(inpages[i]); + if (!kvm_is_upm_enabled(kvm)) { + /* content of memory is updated, mark pages dirty */ + for (i = 0; i < npages; i++) { + set_page_dirty_lock(inpages[i]); + mark_page_accessed(inpages[i]); + } + /* unlock the user pages */ + sev_unpin_memory(kvm, inpages, npages); } - /* unlock the user pages */ - sev_unpin_memory(kvm, inpages, npages); + return ret; } @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm, goto e_ret; kvm_release_pfn_clean(pfn); } - kvm_vm_set_region_attr(kvm, range->start, range->end, - true /* priv_attr */); + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE); e_ret: return ret; }