diff mbox series

[Part2,v5,38/45] KVM: SVM: Add support to handle Page State Change VMGEXIT

Message ID 20210820155918.7518-39-brijesh.singh@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Brijesh Singh Aug. 20, 2021, 3:59 p.m. UTC
SEV-SNP VMs can ask the hypervisor to change the page state in the RMP
table to be private or shared using the Page State Change NAE event
as defined in the GHCB specification version 2.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/sev-common.h |  7 +++
 arch/x86/kvm/svm/sev.c            | 82 +++++++++++++++++++++++++++++--
 2 files changed, 84 insertions(+), 5 deletions(-)

Comments

Dr. David Alan Gilbert Sept. 28, 2021, 10:17 a.m. UTC | #1
* Brijesh Singh (brijesh.singh@amd.com) wrote:
> SEV-SNP VMs can ask the hypervisor to change the page state in the RMP
> table to be private or shared using the Page State Change NAE event
> as defined in the GHCB specification version 2.
> 
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> ---
>  arch/x86/include/asm/sev-common.h |  7 +++
>  arch/x86/kvm/svm/sev.c            | 82 +++++++++++++++++++++++++++++--
>  2 files changed, 84 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
> index 4980f77aa1d5..5ee30bb2cdb8 100644
> --- a/arch/x86/include/asm/sev-common.h
> +++ b/arch/x86/include/asm/sev-common.h
> @@ -126,6 +126,13 @@ enum psc_op {
>  /* SNP Page State Change NAE event */
>  #define VMGEXIT_PSC_MAX_ENTRY		253
>  
> +/* The page state change hdr structure in not valid */
> +#define PSC_INVALID_HDR			1
> +/* The hdr.cur_entry or hdr.end_entry is not valid */
> +#define PSC_INVALID_ENTRY		2
> +/* Page state change encountered undefined error */
> +#define PSC_UNDEF_ERR			3
> +
>  struct psc_hdr {
>  	u16 cur_entry;
>  	u16 end_entry;
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 6d9483ec91ab..0de85ed63e9b 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -2731,6 +2731,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
>  	case SVM_VMGEXIT_AP_JUMP_TABLE:
>  	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
>  	case SVM_VMGEXIT_HV_FEATURES:
> +	case SVM_VMGEXIT_PSC:
>  		break;
>  	default:
>  		goto vmgexit_err;
> @@ -3004,13 +3005,13 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>  		 */
>  		rc = snp_check_and_build_npt(vcpu, gpa, level);
>  		if (rc)
> -			return -EINVAL;
> +			return PSC_UNDEF_ERR;
>  
>  		if (op == SNP_PAGE_STATE_PRIVATE) {
>  			hva_t hva;
>  
>  			if (snp_gpa_to_hva(kvm, gpa, &hva))
> -				return -EINVAL;
> +				return PSC_UNDEF_ERR;
>  
>  			/*
>  			 * Verify that the hva range is registered. This enforcement is
> @@ -3022,7 +3023,7 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>  			rc = is_hva_registered(kvm, hva, page_level_size(level));
>  			mutex_unlock(&kvm->lock);
>  			if (!rc)
> -				return -EINVAL;
> +				return PSC_UNDEF_ERR;
>  
>  			/*
>  			 * Mark the userspace range unmerable before adding the pages
> @@ -3032,7 +3033,7 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>  			rc = snp_mark_unmergable(kvm, hva, page_level_size(level));
>  			mmap_write_unlock(kvm->mm);
>  			if (rc)
> -				return -EINVAL;
> +				return PSC_UNDEF_ERR;
>  		}
>  
>  		write_lock(&kvm->mmu_lock);
> @@ -3062,8 +3063,11 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>  		case SNP_PAGE_STATE_PRIVATE:
>  			rc = rmp_make_private(pfn, gpa, level, sev->asid, false);
>  			break;
> +		case SNP_PAGE_STATE_PSMASH:
> +		case SNP_PAGE_STATE_UNSMASH:
> +			/* TODO: Add support to handle it */
>  		default:
> -			rc = -EINVAL;
> +			rc = PSC_INVALID_ENTRY;
>  			break;
>  		}
>  
> @@ -3081,6 +3085,65 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>  	return 0;
>  }
>  
> +static inline unsigned long map_to_psc_vmgexit_code(int rc)
> +{
> +	switch (rc) {
> +	case PSC_INVALID_HDR:
> +		return ((1ul << 32) | 1);
> +	case PSC_INVALID_ENTRY:
> +		return ((1ul << 32) | 2);
> +	case RMPUPDATE_FAIL_OVERLAP:
> +		return ((3ul << 32) | 2);
> +	default: return (4ul << 32);
> +	}

Are these the values defined in 56421 section 4.1.6 ?
If so, that says:
  SW_EXITINFO2[63:32] == 0x00000100
      The hypervisor encountered some other error situation and was not able to complete the
      request identified by page_state_change_header.cur_entry. It is left to the guest to decide how
      to proceed in this situation.

so it looks like the default should be 0x100 rather than 4?

(It's a shame they're all magical constants, it would be nice if the
standard have them names)

Dave


> +}
> +
> +static unsigned long snp_handle_page_state_change(struct vcpu_svm *svm)
> +{
> +	struct kvm_vcpu *vcpu = &svm->vcpu;
> +	int level, op, rc = PSC_UNDEF_ERR;
> +	struct snp_psc_desc *info;
> +	struct psc_entry *entry;
> +	u16 cur, end;
> +	gpa_t gpa;
> +
> +	if (!sev_snp_guest(vcpu->kvm))
> +		return PSC_INVALID_HDR;
> +
> +	if (!setup_vmgexit_scratch(svm, true, sizeof(*info))) {
> +		pr_err("vmgexit: scratch area is not setup.\n");
> +		return PSC_INVALID_HDR;
> +	}
> +
> +	info = (struct snp_psc_desc *)svm->ghcb_sa;
> +	cur = info->hdr.cur_entry;
> +	end = info->hdr.end_entry;
> +
> +	if (cur >= VMGEXIT_PSC_MAX_ENTRY ||
> +	    end >= VMGEXIT_PSC_MAX_ENTRY || cur > end)
> +		return PSC_INVALID_ENTRY;
> +
> +	for (; cur <= end; cur++) {
> +		entry = &info->entries[cur];
> +		gpa = gfn_to_gpa(entry->gfn);
> +		level = RMP_TO_X86_PG_LEVEL(entry->pagesize);
> +		op = entry->operation;
> +
> +		if (!IS_ALIGNED(gpa, page_level_size(level))) {
> +			rc = PSC_INVALID_ENTRY;
> +			goto out;
> +		}
> +
> +		rc = __snp_handle_page_state_change(vcpu, op, gpa, level);
> +		if (rc)
> +			goto out;
> +	}
> +
> +out:
> +	info->hdr.cur_entry = cur;
> +	return rc ? map_to_psc_vmgexit_code(rc) : 0;
> +}
> +
>  static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
>  {
>  	struct vmcb_control_area *control = &svm->vmcb->control;
> @@ -3315,6 +3378,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
>  		ret = 1;
>  		break;
>  	}
> +	case SVM_VMGEXIT_PSC: {
> +		unsigned long rc;
> +
> +		ret = 1;
> +
> +		rc = snp_handle_page_state_change(svm);
> +		svm_set_ghcb_sw_exit_info_2(vcpu, rc);
> +		break;
> +	}
>  	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
>  		vcpu_unimpl(vcpu,
>  			    "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
> -- 
> 2.17.1
> 
>
Brijesh Singh Sept. 28, 2021, 11:20 p.m. UTC | #2
On 9/28/21 5:17 AM, Dr. David Alan Gilbert wrote:
> * Brijesh Singh (brijesh.singh@amd.com) wrote:
>> SEV-SNP VMs can ask the hypervisor to change the page state in the RMP
>> table to be private or shared using the Page State Change NAE event
>> as defined in the GHCB specification version 2.
>>
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> ---
>>  arch/x86/include/asm/sev-common.h |  7 +++
>>  arch/x86/kvm/svm/sev.c            | 82 +++++++++++++++++++++++++++++--
>>  2 files changed, 84 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
>> index 4980f77aa1d5..5ee30bb2cdb8 100644
>> --- a/arch/x86/include/asm/sev-common.h
>> +++ b/arch/x86/include/asm/sev-common.h
>> @@ -126,6 +126,13 @@ enum psc_op {
>>  /* SNP Page State Change NAE event */
>>  #define VMGEXIT_PSC_MAX_ENTRY		253
>>  
>> +/* The page state change hdr structure in not valid */
>> +#define PSC_INVALID_HDR			1
>> +/* The hdr.cur_entry or hdr.end_entry is not valid */
>> +#define PSC_INVALID_ENTRY		2
>> +/* Page state change encountered undefined error */
>> +#define PSC_UNDEF_ERR			3
>> +
>>  struct psc_hdr {
>>  	u16 cur_entry;
>>  	u16 end_entry;
>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
>> index 6d9483ec91ab..0de85ed63e9b 100644
>> --- a/arch/x86/kvm/svm/sev.c
>> +++ b/arch/x86/kvm/svm/sev.c
>> @@ -2731,6 +2731,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
>>  	case SVM_VMGEXIT_AP_JUMP_TABLE:
>>  	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
>>  	case SVM_VMGEXIT_HV_FEATURES:
>> +	case SVM_VMGEXIT_PSC:
>>  		break;
>>  	default:
>>  		goto vmgexit_err;
>> @@ -3004,13 +3005,13 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>>  		 */
>>  		rc = snp_check_and_build_npt(vcpu, gpa, level);
>>  		if (rc)
>> -			return -EINVAL;
>> +			return PSC_UNDEF_ERR;
>>  
>>  		if (op == SNP_PAGE_STATE_PRIVATE) {
>>  			hva_t hva;
>>  
>>  			if (snp_gpa_to_hva(kvm, gpa, &hva))
>> -				return -EINVAL;
>> +				return PSC_UNDEF_ERR;
>>  
>>  			/*
>>  			 * Verify that the hva range is registered. This enforcement is
>> @@ -3022,7 +3023,7 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>>  			rc = is_hva_registered(kvm, hva, page_level_size(level));
>>  			mutex_unlock(&kvm->lock);
>>  			if (!rc)
>> -				return -EINVAL;
>> +				return PSC_UNDEF_ERR;
>>  
>>  			/*
>>  			 * Mark the userspace range unmerable before adding the pages
>> @@ -3032,7 +3033,7 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>>  			rc = snp_mark_unmergable(kvm, hva, page_level_size(level));
>>  			mmap_write_unlock(kvm->mm);
>>  			if (rc)
>> -				return -EINVAL;
>> +				return PSC_UNDEF_ERR;
>>  		}
>>  
>>  		write_lock(&kvm->mmu_lock);
>> @@ -3062,8 +3063,11 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>>  		case SNP_PAGE_STATE_PRIVATE:
>>  			rc = rmp_make_private(pfn, gpa, level, sev->asid, false);
>>  			break;
>> +		case SNP_PAGE_STATE_PSMASH:
>> +		case SNP_PAGE_STATE_UNSMASH:
>> +			/* TODO: Add support to handle it */
>>  		default:
>> -			rc = -EINVAL;
>> +			rc = PSC_INVALID_ENTRY;
>>  			break;
>>  		}
>>  
>> @@ -3081,6 +3085,65 @@ static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
>>  	return 0;
>>  }
>>  
>> +static inline unsigned long map_to_psc_vmgexit_code(int rc)
>> +{
>> +	switch (rc) {
>> +	case PSC_INVALID_HDR:
>> +		return ((1ul << 32) | 1);
>> +	case PSC_INVALID_ENTRY:
>> +		return ((1ul << 32) | 2);
>> +	case RMPUPDATE_FAIL_OVERLAP:
>> +		return ((3ul << 32) | 2);
>> +	default: return (4ul << 32);
>> +	}
> Are these the values defined in 56421 section 4.1.6 ?
> If so, that says:
>   SW_EXITINFO2[63:32] == 0x00000100
>       The hypervisor encountered some other error situation and was not able to complete the
>       request identified by page_state_change_header.cur_entry. It is left to the guest to decide how
>       to proceed in this situation.
>
> so it looks like the default should be 0x100 rather than 4?

Ah good catch, it should be 0x100. I will fix it.


> (It's a shame they're all magical constants, it would be nice if the
> standard have them names)

In early RFC's I had a macros to build the error code but based on
feedback's went with this function with open coded values.

thanks
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 4980f77aa1d5..5ee30bb2cdb8 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -126,6 +126,13 @@  enum psc_op {
 /* SNP Page State Change NAE event */
 #define VMGEXIT_PSC_MAX_ENTRY		253
 
+/* The page state change hdr structure in not valid */
+#define PSC_INVALID_HDR			1
+/* The hdr.cur_entry or hdr.end_entry is not valid */
+#define PSC_INVALID_ENTRY		2
+/* Page state change encountered undefined error */
+#define PSC_UNDEF_ERR			3
+
 struct psc_hdr {
 	u16 cur_entry;
 	u16 end_entry;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 6d9483ec91ab..0de85ed63e9b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2731,6 +2731,7 @@  static int sev_es_validate_vmgexit(struct vcpu_svm *svm, u64 *exit_code)
 	case SVM_VMGEXIT_AP_JUMP_TABLE:
 	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
 	case SVM_VMGEXIT_HV_FEATURES:
+	case SVM_VMGEXIT_PSC:
 		break;
 	default:
 		goto vmgexit_err;
@@ -3004,13 +3005,13 @@  static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 		 */
 		rc = snp_check_and_build_npt(vcpu, gpa, level);
 		if (rc)
-			return -EINVAL;
+			return PSC_UNDEF_ERR;
 
 		if (op == SNP_PAGE_STATE_PRIVATE) {
 			hva_t hva;
 
 			if (snp_gpa_to_hva(kvm, gpa, &hva))
-				return -EINVAL;
+				return PSC_UNDEF_ERR;
 
 			/*
 			 * Verify that the hva range is registered. This enforcement is
@@ -3022,7 +3023,7 @@  static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 			rc = is_hva_registered(kvm, hva, page_level_size(level));
 			mutex_unlock(&kvm->lock);
 			if (!rc)
-				return -EINVAL;
+				return PSC_UNDEF_ERR;
 
 			/*
 			 * Mark the userspace range unmerable before adding the pages
@@ -3032,7 +3033,7 @@  static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 			rc = snp_mark_unmergable(kvm, hva, page_level_size(level));
 			mmap_write_unlock(kvm->mm);
 			if (rc)
-				return -EINVAL;
+				return PSC_UNDEF_ERR;
 		}
 
 		write_lock(&kvm->mmu_lock);
@@ -3062,8 +3063,11 @@  static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 		case SNP_PAGE_STATE_PRIVATE:
 			rc = rmp_make_private(pfn, gpa, level, sev->asid, false);
 			break;
+		case SNP_PAGE_STATE_PSMASH:
+		case SNP_PAGE_STATE_UNSMASH:
+			/* TODO: Add support to handle it */
 		default:
-			rc = -EINVAL;
+			rc = PSC_INVALID_ENTRY;
 			break;
 		}
 
@@ -3081,6 +3085,65 @@  static int __snp_handle_page_state_change(struct kvm_vcpu *vcpu, enum psc_op op,
 	return 0;
 }
 
+static inline unsigned long map_to_psc_vmgexit_code(int rc)
+{
+	switch (rc) {
+	case PSC_INVALID_HDR:
+		return ((1ul << 32) | 1);
+	case PSC_INVALID_ENTRY:
+		return ((1ul << 32) | 2);
+	case RMPUPDATE_FAIL_OVERLAP:
+		return ((3ul << 32) | 2);
+	default: return (4ul << 32);
+	}
+}
+
+static unsigned long snp_handle_page_state_change(struct vcpu_svm *svm)
+{
+	struct kvm_vcpu *vcpu = &svm->vcpu;
+	int level, op, rc = PSC_UNDEF_ERR;
+	struct snp_psc_desc *info;
+	struct psc_entry *entry;
+	u16 cur, end;
+	gpa_t gpa;
+
+	if (!sev_snp_guest(vcpu->kvm))
+		return PSC_INVALID_HDR;
+
+	if (!setup_vmgexit_scratch(svm, true, sizeof(*info))) {
+		pr_err("vmgexit: scratch area is not setup.\n");
+		return PSC_INVALID_HDR;
+	}
+
+	info = (struct snp_psc_desc *)svm->ghcb_sa;
+	cur = info->hdr.cur_entry;
+	end = info->hdr.end_entry;
+
+	if (cur >= VMGEXIT_PSC_MAX_ENTRY ||
+	    end >= VMGEXIT_PSC_MAX_ENTRY || cur > end)
+		return PSC_INVALID_ENTRY;
+
+	for (; cur <= end; cur++) {
+		entry = &info->entries[cur];
+		gpa = gfn_to_gpa(entry->gfn);
+		level = RMP_TO_X86_PG_LEVEL(entry->pagesize);
+		op = entry->operation;
+
+		if (!IS_ALIGNED(gpa, page_level_size(level))) {
+			rc = PSC_INVALID_ENTRY;
+			goto out;
+		}
+
+		rc = __snp_handle_page_state_change(vcpu, op, gpa, level);
+		if (rc)
+			goto out;
+	}
+
+out:
+	info->hdr.cur_entry = cur;
+	return rc ? map_to_psc_vmgexit_code(rc) : 0;
+}
+
 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -3315,6 +3378,15 @@  int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 		ret = 1;
 		break;
 	}
+	case SVM_VMGEXIT_PSC: {
+		unsigned long rc;
+
+		ret = 1;
+
+		rc = snp_handle_page_state_change(svm);
+		svm_set_ghcb_sw_exit_info_2(vcpu, rc);
+		break;
+	}
 	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
 		vcpu_unimpl(vcpu,
 			    "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",