@@ -34,6 +34,7 @@
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
+#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
#define UVC_CMD_CREATE_SEC_CPU 0x0120
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
@@ -81,6 +82,7 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNSHARE_ALL = 20,
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
+ BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
BIT_UVC_CMD_DUMP_INIT = 24,
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
BIT_UVC_CMD_DUMP_CPU = 26,
@@ -230,6 +232,14 @@ struct uv_cb_nodata {
u64 reserved20[4];
} __packed __aligned(8);
+/* Destroy Configuration Fast */
+struct uv_cb_destroy_fast {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 handle;
+ u64 reserved20[5];
+} __packed __aligned(8);
+
/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
@@ -201,6 +201,9 @@ static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
{
int cc;
+ /* It used the destroy-fast UVC, nothing left to do here */
+ if (!leftover->handle)
+ goto done_fast;
cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
@@ -215,6 +218,7 @@ static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
vfree(leftover->stor_var);
+done_fast:
atomic_dec(&kvm->mm->context.protected_count);
return 0;
}
@@ -248,6 +252,32 @@ static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
+static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
+{
+ struct uv_cb_destroy_fast uvcb = {
+ .header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
+ .header.len = sizeof(uvcb),
+ .handle = kvm_s390_pv_get_handle(kvm),
+ };
+ int cc;
+
+ cc = uv_call_sched(0, (u64)&uvcb);
+ *rc = uvcb.header.rc;
+ *rrc = uvcb.header.rrc;
+ WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x", *rc, *rrc);
+ WARN_ONCE(cc, "protvirt destroy vm fast failed rc %x rrc %x", *rc, *rrc);
+ /* Inteded memory leak on "impossible" error */
+ if (!cc)
+ kvm_s390_pv_dealloc_vm(kvm);
+ return cc ? -EIO : 0;
+}
+
+static inline bool is_destroy_fast_available(void)
+{
+ return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
+}
+
/**
* kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
* @kvm: the VM
@@ -269,6 +299,7 @@ static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
{
struct pv_vm_to_be_destroyed *priv;
+ int res = 0;
lockdep_assert_held(&kvm->lock);
/*
@@ -281,14 +312,21 @@ int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
if (!priv)
return -ENOMEM;
- priv->stor_var = kvm->arch.pv.stor_var;
- priv->stor_base = kvm->arch.pv.stor_base;
- priv->handle = kvm_s390_pv_get_handle(kvm);
- priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
- WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
- if (s390_replace_asce(kvm->arch.gmap)) {
+ if (is_destroy_fast_available()) {
+ res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
+ } else {
+ priv->stor_var = kvm->arch.pv.stor_var;
+ priv->stor_base = kvm->arch.pv.stor_base;
+ priv->handle = kvm_s390_pv_get_handle(kvm);
+ priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
+ WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
+ if (s390_replace_asce(kvm->arch.gmap))
+ res = -ENOMEM;
+ }
+
+ if (res) {
kfree(priv);
- return -ENOMEM;
+ return res;
}
kvm_s390_destroy_lower_2g(kvm);
@@ -469,6 +507,7 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
{
struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
u16 dummy;
+ int r;
/*
* No locking is needed since this is the last thread of the last user of this
@@ -477,7 +516,9 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
* unregistered. This means that if this notifier runs, then the
* struct kvm is still valid.
*/
- kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
+ r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
+ if (!r && is_destroy_fast_available())
+ kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
}
static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {