Message ID | eeacea5ff4c2c5ba16a14dfdb86869dc5b17520a.1588711355.git.ashish.kalra@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV guest live migration support | expand |
On Tue, May 5, 2020 at 2:18 PM Ashish Kalra <Ashish.Kalra@amd.com> wrote: > > From: Brijesh Singh <Brijesh.Singh@amd.com> > > The ioctl can be used to set page encryption bitmap for an > incoming guest. > > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: "H. Peter Anvin" <hpa@zytor.com> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: "Radim Krčmář" <rkrcmar@redhat.com> > Cc: Joerg Roedel <joro@8bytes.org> > Cc: Borislav Petkov <bp@suse.de> > Cc: Tom Lendacky <thomas.lendacky@amd.com> > Cc: x86@kernel.org > Cc: kvm@vger.kernel.org > Cc: linux-kernel@vger.kernel.org > Reviewed-by: Venu Busireddy <venu.busireddy@oracle.com> > Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> > --- > Documentation/virt/kvm/api.rst | 44 +++++++++++++++++++++++++++++ > arch/x86/include/asm/kvm_host.h | 2 ++ > arch/x86/kvm/svm/sev.c | 50 +++++++++++++++++++++++++++++++++ > arch/x86/kvm/svm/svm.c | 1 + > arch/x86/kvm/svm/svm.h | 1 + > arch/x86/kvm/x86.c | 12 ++++++++ > include/uapi/linux/kvm.h | 1 + > 7 files changed, 111 insertions(+) > > diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst > index ecad84086892..fa70017ee693 100644 > --- a/Documentation/virt/kvm/api.rst > +++ b/Documentation/virt/kvm/api.rst > @@ -4663,6 +4663,28 @@ or shared. The bitmap can be used during the guest migration. If the page > is private then the userspace need to use SEV migration commands to transmit > the page. > > +4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl) > +--------------------------------------- > + > +:Capability: basic > +:Architectures: x86 > +:Type: vm ioctl > +:Parameters: struct kvm_page_enc_bitmap (in/out) > +:Returns: 0 on success, -1 on error > + > +/* for KVM_SET_PAGE_ENC_BITMAP */ > +struct kvm_page_enc_bitmap { > + __u64 start_gfn; > + __u64 num_pages; > + union { > + void __user *enc_bitmap; /* one bit per page */ > + __u64 padding2; > + }; > +}; > + > +During the guest live migration the outgoing guest exports its page encryption > +bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption > +bitmap for an incoming guest. > > 4.125 KVM_S390_PV_COMMAND > ------------------------- > @@ -4717,6 +4739,28 @@ KVM_PV_VM_VERIFY > Verify the integrity of the unpacked image. Only if this succeeds, > KVM is allowed to start protected VCPUs. > > +4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl) > +--------------------------------------- > + > +:Capability: basic > +:Architectures: x86 > +:Type: vm ioctl > +:Parameters: struct kvm_page_enc_bitmap (in/out) > +:Returns: 0 on success, -1 on error > + > +/* for KVM_SET_PAGE_ENC_BITMAP */ > +struct kvm_page_enc_bitmap { > + __u64 start_gfn; > + __u64 num_pages; > + union { > + void __user *enc_bitmap; /* one bit per page */ > + __u64 padding2; > + }; > +}; > + > +During the guest live migration the outgoing guest exports its page encryption > +bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption > +bitmap for an incoming guest. > > 5. The kvm_run structure > ======================== > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 9e428befb6a4..fc74144d5ab0 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1258,6 +1258,8 @@ struct kvm_x86_ops { > unsigned long sz, unsigned long mode); > int (*get_page_enc_bitmap)(struct kvm *kvm, > struct kvm_page_enc_bitmap *bmap); > + int (*set_page_enc_bitmap)(struct kvm *kvm, > + struct kvm_page_enc_bitmap *bmap); > }; > > struct kvm_x86_init_ops { > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 387045902470..30efc1068707 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -1504,6 +1504,56 @@ int svm_get_page_enc_bitmap(struct kvm *kvm, > return ret; > } > > +int svm_set_page_enc_bitmap(struct kvm *kvm, > + struct kvm_page_enc_bitmap *bmap) > +{ > + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; > + unsigned long gfn_start, gfn_end; > + unsigned long *bitmap; > + unsigned long sz; > + int ret; > + > + if (!sev_guest(kvm)) > + return -ENOTTY; > + /* special case of resetting the complete bitmap */ > + if (!bmap->enc_bitmap) { > + mutex_lock(&kvm->lock); > + /* by default all pages are marked encrypted */ > + if (sev->page_enc_bmap_size) > + bitmap_fill(sev->page_enc_bmap, > + sev->page_enc_bmap_size); > + mutex_unlock(&kvm->lock); > + return 0; > + } > > + > + gfn_start = bmap->start_gfn; > + gfn_end = gfn_start + bmap->num_pages; > + > + sz = ALIGN(bmap->num_pages, BITS_PER_LONG) / 8; > + bitmap = kmalloc(sz, GFP_KERNEL); > + if (!bitmap) > + return -ENOMEM; > + > + ret = -EFAULT; > + if (copy_from_user(bitmap, bmap->enc_bitmap, sz)) > + goto out; > + > + mutex_lock(&kvm->lock); > + ret = sev_resize_page_enc_bitmap(kvm, gfn_end); > + if (ret) > + goto unlock; > + > + bitmap_copy(sev->page_enc_bmap + BIT_WORD(gfn_start), bitmap, > + (gfn_end - gfn_start)); I *think* this assumes that gfn_start is a multiple of 8. I'm not certain I have a clean suggestion for fixing this, other than advertising that this is an expectation, and returning an error if that is not true. If I'm reading bitmap_copy correctly, I also think it assumes all bitmaps have lengths that are unsigned long aligned, which surprised me. > > + > + ret = 0; > +unlock: > + mutex_unlock(&kvm->lock); > +out: > + kfree(bitmap); > + return ret; > +} > + > int svm_mem_enc_op(struct kvm *kvm, void __user *argp) > { > struct kvm_sev_cmd sev_cmd; > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 588709a9f68e..501e82f5593c 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -4017,6 +4017,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { > > .page_enc_status_hc = svm_page_enc_status_hc, > .get_page_enc_bitmap = svm_get_page_enc_bitmap, > + .set_page_enc_bitmap = svm_set_page_enc_bitmap, > }; > > static struct kvm_x86_init_ops svm_init_ops __initdata = { > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h > index f087fa7b380c..2ebdcce50312 100644 > --- a/arch/x86/kvm/svm/svm.h > +++ b/arch/x86/kvm/svm/svm.h > @@ -405,6 +405,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm); > int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa, > unsigned long npages, unsigned long enc); > int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap); > +int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap); > > /* avic.c */ > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 937797cfaf9a..c4166d7a0493 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -5220,6 +5220,18 @@ long kvm_arch_vm_ioctl(struct file *filp, > r = kvm_x86_ops.get_page_enc_bitmap(kvm, &bitmap); > break; > } > + case KVM_SET_PAGE_ENC_BITMAP: { > + struct kvm_page_enc_bitmap bitmap; > + > + r = -EFAULT; > + if (copy_from_user(&bitmap, argp, sizeof(bitmap))) > + goto out; > + > + r = -ENOTTY; > + if (kvm_x86_ops.set_page_enc_bitmap) > + r = kvm_x86_ops.set_page_enc_bitmap(kvm, &bitmap); > + break; > + } > default: > r = -ENOTTY; > } > diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h > index af62f2afaa5d..2798b17484d0 100644 > --- a/include/uapi/linux/kvm.h > +++ b/include/uapi/linux/kvm.h > @@ -1529,6 +1529,7 @@ struct kvm_pv_cmd { > #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) > > #define KVM_GET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc6, struct kvm_page_enc_bitmap) > +#define KVM_SET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc7, struct kvm_page_enc_bitmap) > > /* Secure Encrypted Virtualization command */ > enum sev_cmd_id { > -- > 2.17.1 > Otherwise, this looks good to me. Thanks for merging the ioctls together. Reviewed-by: Steve Rutherford <srutherford@google.com>
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index ecad84086892..fa70017ee693 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4663,6 +4663,28 @@ or shared. The bitmap can be used during the guest migration. If the page is private then the userspace need to use SEV migration commands to transmit the page. +4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl) +--------------------------------------- + +:Capability: basic +:Architectures: x86 +:Type: vm ioctl +:Parameters: struct kvm_page_enc_bitmap (in/out) +:Returns: 0 on success, -1 on error + +/* for KVM_SET_PAGE_ENC_BITMAP */ +struct kvm_page_enc_bitmap { + __u64 start_gfn; + __u64 num_pages; + union { + void __user *enc_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + +During the guest live migration the outgoing guest exports its page encryption +bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption +bitmap for an incoming guest. 4.125 KVM_S390_PV_COMMAND ------------------------- @@ -4717,6 +4739,28 @@ KVM_PV_VM_VERIFY Verify the integrity of the unpacked image. Only if this succeeds, KVM is allowed to start protected VCPUs. +4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl) +--------------------------------------- + +:Capability: basic +:Architectures: x86 +:Type: vm ioctl +:Parameters: struct kvm_page_enc_bitmap (in/out) +:Returns: 0 on success, -1 on error + +/* for KVM_SET_PAGE_ENC_BITMAP */ +struct kvm_page_enc_bitmap { + __u64 start_gfn; + __u64 num_pages; + union { + void __user *enc_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + +During the guest live migration the outgoing guest exports its page encryption +bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption +bitmap for an incoming guest. 5. The kvm_run structure ======================== diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9e428befb6a4..fc74144d5ab0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1258,6 +1258,8 @@ struct kvm_x86_ops { unsigned long sz, unsigned long mode); int (*get_page_enc_bitmap)(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap); + int (*set_page_enc_bitmap)(struct kvm *kvm, + struct kvm_page_enc_bitmap *bmap); }; struct kvm_x86_init_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 387045902470..30efc1068707 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1504,6 +1504,56 @@ int svm_get_page_enc_bitmap(struct kvm *kvm, return ret; } +int svm_set_page_enc_bitmap(struct kvm *kvm, + struct kvm_page_enc_bitmap *bmap) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + unsigned long gfn_start, gfn_end; + unsigned long *bitmap; + unsigned long sz; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + /* special case of resetting the complete bitmap */ + if (!bmap->enc_bitmap) { + mutex_lock(&kvm->lock); + /* by default all pages are marked encrypted */ + if (sev->page_enc_bmap_size) + bitmap_fill(sev->page_enc_bmap, + sev->page_enc_bmap_size); + mutex_unlock(&kvm->lock); + return 0; + } + + gfn_start = bmap->start_gfn; + gfn_end = gfn_start + bmap->num_pages; + + sz = ALIGN(bmap->num_pages, BITS_PER_LONG) / 8; + bitmap = kmalloc(sz, GFP_KERNEL); + if (!bitmap) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(bitmap, bmap->enc_bitmap, sz)) + goto out; + + mutex_lock(&kvm->lock); + ret = sev_resize_page_enc_bitmap(kvm, gfn_end); + if (ret) + goto unlock; + + bitmap_copy(sev->page_enc_bmap + BIT_WORD(gfn_start), bitmap, + (gfn_end - gfn_start)); + + ret = 0; +unlock: + mutex_unlock(&kvm->lock); +out: + kfree(bitmap); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 588709a9f68e..501e82f5593c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4017,6 +4017,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .page_enc_status_hc = svm_page_enc_status_hc, .get_page_enc_bitmap = svm_get_page_enc_bitmap, + .set_page_enc_bitmap = svm_set_page_enc_bitmap, }; static struct kvm_x86_init_ops svm_init_ops __initdata = { diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index f087fa7b380c..2ebdcce50312 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -405,6 +405,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm); int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa, unsigned long npages, unsigned long enc); int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap); +int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap); /* avic.c */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 937797cfaf9a..c4166d7a0493 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5220,6 +5220,18 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_x86_ops.get_page_enc_bitmap(kvm, &bitmap); break; } + case KVM_SET_PAGE_ENC_BITMAP: { + struct kvm_page_enc_bitmap bitmap; + + r = -EFAULT; + if (copy_from_user(&bitmap, argp, sizeof(bitmap))) + goto out; + + r = -ENOTTY; + if (kvm_x86_ops.set_page_enc_bitmap) + r = kvm_x86_ops.set_page_enc_bitmap(kvm, &bitmap); + break; + } default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index af62f2afaa5d..2798b17484d0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1529,6 +1529,7 @@ struct kvm_pv_cmd { #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) #define KVM_GET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc6, struct kvm_page_enc_bitmap) +#define KVM_SET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc7, struct kvm_page_enc_bitmap) /* Secure Encrypted Virtualization command */ enum sev_cmd_id {