@@ -948,6 +948,62 @@ Use cases
up its internal state for this virtual machine.
+H_SVM_INIT_ABORT
+----------------
+
+ Abort the process of securing an SVM.
+
+Syntax
+~~~~~~
+
+.. code-block:: c
+
+ uint64_t hypercall(const uint64_t H_SVM_INIT_ABORT,
+ uint64_t guest_pc, /* guest NIP to return to */
+ uint64_t guest_msr, /* guest MSR value */
+
+Return values
+~~~~~~~~~~~~~
+
+ One of the following values:
+
+ * H_PARAMETER on successfully cleaning up the state, Hypervisor will
+ return this value to the **guest**, to indicate that the underlying
+ UV_ESM ultra call failed.
+
+ * H_UNSUPPORTED if called from the wrong context (e.g. from an SVM
+ or before an H_SVM_INIT_START hypercall). This will return to the
+ Ultravisor which incorrectly issued the hcall.
+
+
+Description
+~~~~~~~~~~~
+
+ Abort the process of securing a virtual machine. This call must
+ be made after a prior call to ``H_SVM_INIT_START`` hypercall and
+ before a call to ``H_SVM_INIT_DONE``.
+
+ This hcall will cleanup any partial state that was established for
+ the VM since the prior ``H_SVM_INIT_START hcall`` including paging
+ out pages that were paged-into secure memory, unregistering memory
+ slots etc.
+
+ After the partial state is cleaned up, control returns to the address
+ specified in ``guest_pc`` with the MSR values set to ``guest_msr``.
+ These parameters are expected to match the state of NIP and MSR
+ registers of the VM at the time it issued the ``UV_ESM`` ultra call
+ to transition to a secure VM.
+
+Use cases
+~~~~~~~~~
+
+ If after a successful call to ``H_SVM_INIT_START``, the Ultravisor
+ encounters an error while securing a virtual machine, either due
+ to lack of resources or because the VM's security information could
+ not be validated, Ultravisor informs the Hypervisor about it.
+ Hypervisor should use this call to clean up any internal state for
+ this virtual machine and return to the VM.
+
H_SVM_PAGE_IN
-------------
@@ -350,6 +350,7 @@
#define H_SVM_PAGE_OUT 0xEF04
#define H_SVM_INIT_START 0xEF08
#define H_SVM_INIT_DONE 0xEF0C
+#define H_SVM_INIT_ABORT 0xEF14
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
@@ -19,8 +19,9 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
+unsigned long kvmppc_h_svm_init_abort(struct kvm_vcpu *vcpu);
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
- struct kvm *kvm);
+ struct kvm *kvm, bool skip_page_out);
#else
static inline int kvmppc_uvmem_init(void)
{
@@ -62,6 +63,11 @@ static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
return H_UNSUPPORTED;
}
+static inline unsigned long kvmppc_h_svm_init_abort(struct kvm_vcpu *vcpu)
+{
+ return H_UNSUPPORTED;
+}
+
static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
{
return -EFAULT;
@@ -69,6 +75,6 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
static inline void
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
- struct kvm *kvm) { }
+ struct kvm *kvm, bool skip_page_out) { }
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
@@ -278,6 +278,7 @@ struct kvm_resize_hpt;
/* Flag values for kvm_arch.secure_guest */
#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */
+#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
struct kvm_arch {
unsigned int lpid;
@@ -1102,7 +1102,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
unsigned int shift;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
- kvmppc_uvmem_drop_pages(memslot, kvm);
+ kvmppc_uvmem_drop_pages(memslot, kvm, true);
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return;
@@ -1099,6 +1099,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_INIT_DONE:
ret = kvmppc_h_svm_init_done(vcpu->kvm);
break;
+ case H_SVM_INIT_ABORT:
+ ret = kvmppc_h_svm_init_abort(vcpu);
+ break;
default:
return RESUME_HOST;
@@ -5493,7 +5496,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
continue;
kvm_for_each_memslot(memslot, slots) {
- kvmppc_uvmem_drop_pages(memslot, kvm);
+ kvmppc_uvmem_drop_pages(memslot, kvm, true);
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
}
}
@@ -259,7 +259,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
* QEMU page table with normal PTEs from newly allocated pages.
*/
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
- struct kvm *kvm)
+ struct kvm *kvm, bool skip_page_out)
{
int i;
struct kvmppc_uvmem_page_pvt *pvt;
@@ -277,7 +277,8 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
- pvt->skip_page_out = true;
+ pvt->skip_page_out = skip_page_out;
+
mutex_unlock(&kvm->arch.uvmem_lock);
pfn = gfn_to_pfn(kvm, gfn);
@@ -287,6 +288,49 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
}
}
+unsigned long kvmppc_h_svm_init_abort(struct kvm_vcpu *vcpu)
+{
+ int i;
+ ulong pc, msr;
+ struct kvm *kvm = vcpu->kvm;
+
+ pc = kvmppc_get_gpr(vcpu, 4);
+ msr = kvmppc_get_gpr(vcpu, 5);
+
+ if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
+ return H_UNSUPPORTED;
+
+ /*
+ * we expect a guest address and guest MSR, so MSR_HV should
+ * not be set. Warn and proceed to terminate the VM.
+ */
+ if (msr & MSR_HV) {
+ pr_warning("MSR_HV set on H_SVM_INIT_ABORT call!\n");
+ msr &= ~MSR_HV;
+ }
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ struct kvm_memory_slot *memslot;
+ struct kvm_memslots *slots = __kvm_memslots(kvm, i);
+
+ if (!slots)
+ continue;
+
+ kvm_for_each_memslot(memslot, slots) {
+ kvmppc_uvmem_drop_pages(memslot, kvm, false);
+ uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
+ }
+ }
+
+ kvm->arch.secure_guest = 0;
+
+ /* Return to the point where VM issued UV_ESM ucall */
+ kvmppc_set_pc(vcpu, pc);
+ kvmppc_set_msr(vcpu, msr);
+
+ return H_PARAMETER;
+}
+
/*
* Get a free device PFN from the pool
*