Message ID | 20220428130102.230790-8-frankja@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kvm: s390: Add PV dump support | expand |
On Thu, 28 Apr 2022 13:01:00 +0000 Janosch Frank <frankja@linux.ibm.com> wrote: > The previous patch introduced the per-VM dump functions now let's > focus on dumping the VCPU state via the newly introduced > KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be > extended with new commands later. > > Signed-off-by: Janosch Frank <frankja@linux.ibm.com> > --- > arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++ > arch/s390/kvm/kvm-s390.h | 1 + > arch/s390/kvm/pv.c | 16 +++++++++ > include/uapi/linux/kvm.h | 5 +++ > 4 files changed, 95 insertions(+) > > diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c > index 8984e8db33b4..d15ce38bef14 100644 > --- a/arch/s390/kvm/kvm-s390.c > +++ b/arch/s390/kvm/kvm-s390.c > @@ -5149,6 +5149,52 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, > return -ENOIOCTLCMD; > } > > +static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, > + struct kvm_pv_cmd *cmd) > +{ > + struct kvm_s390_pv_dmp dmp; > + void *data; > + int ret; > + > + /* Dump initialization is a prerequisite */ > + if (!vcpu->kvm->arch.pv.dumping) > + return -EINVAL; > + > + if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) > + return -EFAULT; > + > + /* We only handle this subcmd right now */ > + if (dmp.subcmd != KVM_PV_DUMP_CPU) > + return -EINVAL; > + > + /* CPU dump length is the same as create cpu storage donation. */ > + if (dmp.buff_len != uv_info.guest_cpu_stor_len) > + return -EINVAL; > + > + data = vzalloc(uv_info.guest_cpu_stor_len); > + if (!data) > + return -ENOMEM; > + > + ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); > + > + VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x", > + vcpu->vcpu_id, cmd->rc, cmd->rrc); > + > + if (ret) { > + vfree(data); > + return -EINVAL; > + } > + > + /* On success copy over the dump data */ > + if (copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) { > + vfree(data); > + return -EFAULT; > + } > + > + vfree(data); > + return 0; > +} > + > long kvm_arch_vcpu_ioctl(struct file *filp, > unsigned int ioctl, unsigned long arg) > { > @@ -5313,6 +5359,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp, > irq_state.len); > break; > } > + case KVM_S390_PV_CPU_COMMAND: { > + struct kvm_pv_cmd cmd; > + > + r = -EINVAL; > + if (!is_prot_virt_host()) > + break; > + > + r = -EFAULT; > + if (copy_from_user(&cmd, argp, sizeof(cmd))) > + break; > + > + r = -EINVAL; > + if (cmd.flags) > + break; > + > + /* We only handle this cmd right now */ > + if (cmd.cmd != KVM_PV_DUMP) > + break; > + > + r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd); > + > + /* Always copy over UV rc / rrc data */ > + if (copy_to_user((__u8 __user *)argp, &cmd.rc, > + sizeof(cmd.rc) + sizeof(cmd.rrc))) > + r = -EFAULT; > + break; > + } > default: > r = -ENOTTY; > } > diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h > index 2868dd0bba25..a39815184350 100644 > --- a/arch/s390/kvm/kvm-s390.h > +++ b/arch/s390/kvm/kvm-s390.h > @@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, > int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, > unsigned long tweak, u16 *rc, u16 *rrc); > int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state); > +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc); > int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, > u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); > > diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c > index d1635ed50078..9ab8192b9b23 100644 > --- a/arch/s390/kvm/pv.c > +++ b/arch/s390/kvm/pv.c > @@ -299,6 +299,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) > return 0; > } > > +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) > +{ > + struct uv_cb_dump_cpu uvcb = { > + .header.cmd = UVC_CMD_DUMP_CPU, > + .header.len = sizeof(uvcb), > + .cpu_handle = vcpu->arch.pv.handle, > + .dump_area_origin = (u64)buff, > + }; > + int cc; > + > + cc = uv_call_sched(0, (u64)&uvcb); it's a small amount of data, but you use the _sched variant? and, why aren't you using the _sched variant in the previous patch (for DUMP_COMPLETE)? to be clear: I think the right thing is to always use the _sched variant unless there is a good reason not to (so please fix the previous patch) > + *rc = uvcb.header.rc; > + *rrc = uvcb.header.rrc; > + return cc; > +} > + > /* Size of the cache for the storage state dump data. 1MB for now */ > #define DUMP_BUFF_LEN HPAGE_SIZE > > diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h > index b34850907291..108bc7b7a71b 100644 > --- a/include/uapi/linux/kvm.h > +++ b/include/uapi/linux/kvm.h > @@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt { > #define KVM_CAP_S390_MEM_OP_EXTENSION 211 > #define KVM_CAP_PMU_CAPABILITY 212 > #define KVM_CAP_DISABLE_QUIRKS2 213 > +#define KVM_CAP_S390_PROTECTED_DUMP 214 > > #ifdef KVM_CAP_IRQ_ROUTING > > @@ -1649,6 +1650,7 @@ enum pv_cmd_dmp_id { > KVM_PV_DUMP_INIT, > KVM_PV_DUMP_CONFIG_STOR_STATE, > KVM_PV_DUMP_COMPLETE, > + KVM_PV_DUMP_CPU, > }; > > struct kvm_s390_pv_dmp { > @@ -2110,4 +2112,7 @@ struct kvm_stats_desc { > /* Available with KVM_CAP_XSAVE2 */ > #define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) > > +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ > +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) > + > #endif /* __LINUX_KVM_H */
On 5/9/22 21:11, Claudio Imbrenda wrote: > On Thu, 28 Apr 2022 13:01:00 +0000 > Janosch Frank <frankja@linux.ibm.com> wrote: > >> The previous patch introduced the per-VM dump functions now let's >> focus on dumping the VCPU state via the newly introduced >> KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be >> extended with new commands later. >> >> Signed-off-by: Janosch Frank <frankja@linux.ibm.com> >> --- >> arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++ >> arch/s390/kvm/kvm-s390.h | 1 + >> arch/s390/kvm/pv.c | 16 +++++++++ >> include/uapi/linux/kvm.h | 5 +++ >> 4 files changed, 95 insertions(+) >> >> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c >> index 8984e8db33b4..d15ce38bef14 100644 >> --- a/arch/s390/kvm/kvm-s390.c >> +++ b/arch/s390/kvm/kvm-s390.c >> @@ -5149,6 +5149,52 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, >> return -ENOIOCTLCMD; >> } >> >> +static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, >> + struct kvm_pv_cmd *cmd) >> +{ >> + struct kvm_s390_pv_dmp dmp; >> + void *data; >> + int ret; >> + >> + /* Dump initialization is a prerequisite */ >> + if (!vcpu->kvm->arch.pv.dumping) >> + return -EINVAL; >> + >> + if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) >> + return -EFAULT; >> + >> + /* We only handle this subcmd right now */ >> + if (dmp.subcmd != KVM_PV_DUMP_CPU) >> + return -EINVAL; >> + >> + /* CPU dump length is the same as create cpu storage donation. */ >> + if (dmp.buff_len != uv_info.guest_cpu_stor_len) >> + return -EINVAL; >> + >> + data = vzalloc(uv_info.guest_cpu_stor_len); >> + if (!data) >> + return -ENOMEM; >> + >> + ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); >> + >> + VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x", >> + vcpu->vcpu_id, cmd->rc, cmd->rrc); >> + >> + if (ret) { >> + vfree(data); >> + return -EINVAL; >> + } >> + >> + /* On success copy over the dump data */ >> + if (copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) { >> + vfree(data); >> + return -EFAULT; >> + } >> + >> + vfree(data); >> + return 0; >> +} >> + >> long kvm_arch_vcpu_ioctl(struct file *filp, >> unsigned int ioctl, unsigned long arg) >> { >> @@ -5313,6 +5359,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp, >> irq_state.len); >> break; >> } >> + case KVM_S390_PV_CPU_COMMAND: { >> + struct kvm_pv_cmd cmd; >> + >> + r = -EINVAL; >> + if (!is_prot_virt_host()) >> + break; >> + >> + r = -EFAULT; >> + if (copy_from_user(&cmd, argp, sizeof(cmd))) >> + break; >> + >> + r = -EINVAL; >> + if (cmd.flags) >> + break; >> + >> + /* We only handle this cmd right now */ >> + if (cmd.cmd != KVM_PV_DUMP) >> + break; >> + >> + r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd); >> + >> + /* Always copy over UV rc / rrc data */ >> + if (copy_to_user((__u8 __user *)argp, &cmd.rc, >> + sizeof(cmd.rc) + sizeof(cmd.rrc))) >> + r = -EFAULT; >> + break; >> + } >> default: >> r = -ENOTTY; >> } >> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h >> index 2868dd0bba25..a39815184350 100644 >> --- a/arch/s390/kvm/kvm-s390.h >> +++ b/arch/s390/kvm/kvm-s390.h >> @@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, >> int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, >> unsigned long tweak, u16 *rc, u16 *rrc); >> int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state); >> +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc); >> int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, >> u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); >> >> diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c >> index d1635ed50078..9ab8192b9b23 100644 >> --- a/arch/s390/kvm/pv.c >> +++ b/arch/s390/kvm/pv.c >> @@ -299,6 +299,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) >> return 0; >> } >> >> +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) >> +{ >> + struct uv_cb_dump_cpu uvcb = { >> + .header.cmd = UVC_CMD_DUMP_CPU, >> + .header.len = sizeof(uvcb), >> + .cpu_handle = vcpu->arch.pv.handle, >> + .dump_area_origin = (u64)buff, >> + }; >> + int cc; >> + >> + cc = uv_call_sched(0, (u64)&uvcb); > > it's a small amount of data, but you use the _sched variant? Who knows what FW will do :) > > and, why aren't you using the _sched variant in the previous patch (for > DUMP_COMPLETE)? > > to be clear: I think the right thing is to always use the _sched > variant unless there is a good reason not to (so please fix the previous > patch) Yep, will fix. It might make sense to switch the functions around and make uv_call always do a sched and introduce uv_call_nosched() since that's the special case. But that's something for the future. > >> + *rc = uvcb.header.rc; >> + *rrc = uvcb.header.rrc; >> + return cc; >> +} >> + >> /* Size of the cache for the storage state dump data. 1MB for now */ >> #define DUMP_BUFF_LEN HPAGE_SIZE >> >> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h >> index b34850907291..108bc7b7a71b 100644 >> --- a/include/uapi/linux/kvm.h >> +++ b/include/uapi/linux/kvm.h >> @@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt { >> #define KVM_CAP_S390_MEM_OP_EXTENSION 211 >> #define KVM_CAP_PMU_CAPABILITY 212 >> #define KVM_CAP_DISABLE_QUIRKS2 213 >> +#define KVM_CAP_S390_PROTECTED_DUMP 214 >> >> #ifdef KVM_CAP_IRQ_ROUTING >> >> @@ -1649,6 +1650,7 @@ enum pv_cmd_dmp_id { >> KVM_PV_DUMP_INIT, >> KVM_PV_DUMP_CONFIG_STOR_STATE, >> KVM_PV_DUMP_COMPLETE, >> + KVM_PV_DUMP_CPU, >> }; >> >> struct kvm_s390_pv_dmp { >> @@ -2110,4 +2112,7 @@ struct kvm_stats_desc { >> /* Available with KVM_CAP_XSAVE2 */ >> #define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) >> >> +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ >> +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) >> + >> #endif /* __LINUX_KVM_H */ >
On Tue, 10 May 2022 09:26:50 +0200 Janosch Frank <frankja@linux.ibm.com> wrote: > On 5/9/22 21:11, Claudio Imbrenda wrote: > > On Thu, 28 Apr 2022 13:01:00 +0000 > > Janosch Frank <frankja@linux.ibm.com> wrote: > > > >> The previous patch introduced the per-VM dump functions now let's > >> focus on dumping the VCPU state via the newly introduced > >> KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be > >> extended with new commands later. > >> > >> Signed-off-by: Janosch Frank <frankja@linux.ibm.com> > >> --- > >> arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++ > >> arch/s390/kvm/kvm-s390.h | 1 + > >> arch/s390/kvm/pv.c | 16 +++++++++ > >> include/uapi/linux/kvm.h | 5 +++ > >> 4 files changed, 95 insertions(+) > >> > >> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c > >> index 8984e8db33b4..d15ce38bef14 100644 > >> --- a/arch/s390/kvm/kvm-s390.c > >> +++ b/arch/s390/kvm/kvm-s390.c > >> @@ -5149,6 +5149,52 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, > >> return -ENOIOCTLCMD; > >> } > >> > >> +static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, > >> + struct kvm_pv_cmd *cmd) > >> +{ > >> + struct kvm_s390_pv_dmp dmp; > >> + void *data; > >> + int ret; > >> + > >> + /* Dump initialization is a prerequisite */ > >> + if (!vcpu->kvm->arch.pv.dumping) > >> + return -EINVAL; > >> + > >> + if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) > >> + return -EFAULT; > >> + > >> + /* We only handle this subcmd right now */ > >> + if (dmp.subcmd != KVM_PV_DUMP_CPU) > >> + return -EINVAL; > >> + > >> + /* CPU dump length is the same as create cpu storage donation. */ > >> + if (dmp.buff_len != uv_info.guest_cpu_stor_len) > >> + return -EINVAL; > >> + > >> + data = vzalloc(uv_info.guest_cpu_stor_len); > >> + if (!data) > >> + return -ENOMEM; > >> + > >> + ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); > >> + > >> + VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x", > >> + vcpu->vcpu_id, cmd->rc, cmd->rrc); > >> + > >> + if (ret) { > >> + vfree(data); > >> + return -EINVAL; > >> + } > >> + > >> + /* On success copy over the dump data */ > >> + if (copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) { > >> + vfree(data); > >> + return -EFAULT; > >> + } > >> + > >> + vfree(data); > >> + return 0; > >> +} > >> + > >> long kvm_arch_vcpu_ioctl(struct file *filp, > >> unsigned int ioctl, unsigned long arg) > >> { > >> @@ -5313,6 +5359,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp, > >> irq_state.len); > >> break; > >> } > >> + case KVM_S390_PV_CPU_COMMAND: { > >> + struct kvm_pv_cmd cmd; > >> + > >> + r = -EINVAL; > >> + if (!is_prot_virt_host()) > >> + break; > >> + > >> + r = -EFAULT; > >> + if (copy_from_user(&cmd, argp, sizeof(cmd))) > >> + break; > >> + > >> + r = -EINVAL; > >> + if (cmd.flags) > >> + break; > >> + > >> + /* We only handle this cmd right now */ > >> + if (cmd.cmd != KVM_PV_DUMP) > >> + break; > >> + > >> + r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd); > >> + > >> + /* Always copy over UV rc / rrc data */ > >> + if (copy_to_user((__u8 __user *)argp, &cmd.rc, > >> + sizeof(cmd.rc) + sizeof(cmd.rrc))) > >> + r = -EFAULT; > >> + break; > >> + } > >> default: > >> r = -ENOTTY; > >> } > >> diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h > >> index 2868dd0bba25..a39815184350 100644 > >> --- a/arch/s390/kvm/kvm-s390.h > >> +++ b/arch/s390/kvm/kvm-s390.h > >> @@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, > >> int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, > >> unsigned long tweak, u16 *rc, u16 *rrc); > >> int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state); > >> +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc); > >> int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, > >> u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); > >> > >> diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c > >> index d1635ed50078..9ab8192b9b23 100644 > >> --- a/arch/s390/kvm/pv.c > >> +++ b/arch/s390/kvm/pv.c > >> @@ -299,6 +299,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) > >> return 0; > >> } > >> > >> +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) > >> +{ > >> + struct uv_cb_dump_cpu uvcb = { > >> + .header.cmd = UVC_CMD_DUMP_CPU, > >> + .header.len = sizeof(uvcb), > >> + .cpu_handle = vcpu->arch.pv.handle, > >> + .dump_area_origin = (u64)buff, > >> + }; > >> + int cc; > >> + > >> + cc = uv_call_sched(0, (u64)&uvcb); > > > > it's a small amount of data, but you use the _sched variant? > > Who knows what FW will do :) > > > > > and, why aren't you using the _sched variant in the previous patch (for > > DUMP_COMPLETE)? > > > > to be clear: I think the right thing is to always use the _sched > > variant unless there is a good reason not to (so please fix the previous > > patch) > > Yep, will fix. > > It might make sense to switch the functions around and make uv_call > always do a sched and introduce uv_call_nosched() since that's the > special case. But that's something for the future. I was actually thinking the same thing :) > > > > >> + *rc = uvcb.header.rc; > >> + *rrc = uvcb.header.rrc; > >> + return cc; > >> +} > >> + > >> /* Size of the cache for the storage state dump data. 1MB for now */ > >> #define DUMP_BUFF_LEN HPAGE_SIZE > >> > >> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h > >> index b34850907291..108bc7b7a71b 100644 > >> --- a/include/uapi/linux/kvm.h > >> +++ b/include/uapi/linux/kvm.h > >> @@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt { > >> #define KVM_CAP_S390_MEM_OP_EXTENSION 211 > >> #define KVM_CAP_PMU_CAPABILITY 212 > >> #define KVM_CAP_DISABLE_QUIRKS2 213 > >> +#define KVM_CAP_S390_PROTECTED_DUMP 214 > >> > >> #ifdef KVM_CAP_IRQ_ROUTING > >> > >> @@ -1649,6 +1650,7 @@ enum pv_cmd_dmp_id { > >> KVM_PV_DUMP_INIT, > >> KVM_PV_DUMP_CONFIG_STOR_STATE, > >> KVM_PV_DUMP_COMPLETE, > >> + KVM_PV_DUMP_CPU, > >> }; > >> > >> struct kvm_s390_pv_dmp { > >> @@ -2110,4 +2112,7 @@ struct kvm_stats_desc { > >> /* Available with KVM_CAP_XSAVE2 */ > >> #define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) > >> > >> +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ > >> +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) > >> + > >> #endif /* __LINUX_KVM_H */ > > >
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8984e8db33b4..d15ce38bef14 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5149,6 +5149,52 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp, return -ENOIOCTLCMD; } +static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, + struct kvm_pv_cmd *cmd) +{ + struct kvm_s390_pv_dmp dmp; + void *data; + int ret; + + /* Dump initialization is a prerequisite */ + if (!vcpu->kvm->arch.pv.dumping) + return -EINVAL; + + if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) + return -EFAULT; + + /* We only handle this subcmd right now */ + if (dmp.subcmd != KVM_PV_DUMP_CPU) + return -EINVAL; + + /* CPU dump length is the same as create cpu storage donation. */ + if (dmp.buff_len != uv_info.guest_cpu_stor_len) + return -EINVAL; + + data = vzalloc(uv_info.guest_cpu_stor_len); + if (!data) + return -ENOMEM; + + ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); + + VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x", + vcpu->vcpu_id, cmd->rc, cmd->rrc); + + if (ret) { + vfree(data); + return -EINVAL; + } + + /* On success copy over the dump data */ + if (copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) { + vfree(data); + return -EFAULT; + } + + vfree(data); + return 0; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -5313,6 +5359,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp, irq_state.len); break; } + case KVM_S390_PV_CPU_COMMAND: { + struct kvm_pv_cmd cmd; + + r = -EINVAL; + if (!is_prot_virt_host()) + break; + + r = -EFAULT; + if (copy_from_user(&cmd, argp, sizeof(cmd))) + break; + + r = -EINVAL; + if (cmd.flags) + break; + + /* We only handle this cmd right now */ + if (cmd.cmd != KVM_PV_DUMP) + break; + + r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd); + + /* Always copy over UV rc / rrc data */ + if (copy_to_user((__u8 __user *)argp, &cmd.rc, + sizeof(cmd.rc) + sizeof(cmd.rrc))) + r = -EFAULT; + break; + } default: r = -ENOTTY; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 2868dd0bba25..a39815184350 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, unsigned long tweak, u16 *rc, u16 *rrc); int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state); +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc); int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index d1635ed50078..9ab8192b9b23 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -299,6 +299,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state) return 0; } +int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc) +{ + struct uv_cb_dump_cpu uvcb = { + .header.cmd = UVC_CMD_DUMP_CPU, + .header.len = sizeof(uvcb), + .cpu_handle = vcpu->arch.pv.handle, + .dump_area_origin = (u64)buff, + }; + int cc; + + cc = uv_call_sched(0, (u64)&uvcb); + *rc = uvcb.header.rc; + *rrc = uvcb.header.rrc; + return cc; +} + /* Size of the cache for the storage state dump data. 1MB for now */ #define DUMP_BUFF_LEN HPAGE_SIZE diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b34850907291..108bc7b7a71b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_MEM_OP_EXTENSION 211 #define KVM_CAP_PMU_CAPABILITY 212 #define KVM_CAP_DISABLE_QUIRKS2 213 +#define KVM_CAP_S390_PROTECTED_DUMP 214 #ifdef KVM_CAP_IRQ_ROUTING @@ -1649,6 +1650,7 @@ enum pv_cmd_dmp_id { KVM_PV_DUMP_INIT, KVM_PV_DUMP_CONFIG_STOR_STATE, KVM_PV_DUMP_COMPLETE, + KVM_PV_DUMP_CPU, }; struct kvm_s390_pv_dmp { @@ -2110,4 +2112,7 @@ struct kvm_stats_desc { /* Available with KVM_CAP_XSAVE2 */ #define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) + #endif /* __LINUX_KVM_H */
The previous patch introduced the per-VM dump functions now let's focus on dumping the VCPU state via the newly introduced KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be extended with new commands later. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> --- arch/s390/kvm/kvm-s390.c | 73 ++++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/pv.c | 16 +++++++++ include/uapi/linux/kvm.h | 5 +++ 4 files changed, 95 insertions(+)