@@ -2217,12 +2217,43 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm)
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
+static int kvm_arch_crypto_test_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm)
+{
+ int ret;
+ struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
+
+ switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
+ case CRYCB_FORMAT2: /* APCB1 use 256 bits */
+ ret = bitmap_equal(apm, (unsigned long *)crycb->apcb1.apm, 256);
+ ret &= bitmap_equal(aqm,
+ (unsigned long *)crycb->apcb1.aqm, 256);
+ ret &= bitmap_equal(adm,
+ (unsigned long *)crycb->apcb1.adm, 256);
+ break;
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0: /* Fall through both use APCB0 */
+ ret = bitmap_equal(apm, (unsigned long *)crycb->apcb0.apm, 64);
+ ret &= bitmap_equal(aqm, (unsigned long *)crycb->apcb0.aqm, 16);
+ ret &= bitmap_equal(adm, (unsigned long *)crycb->apcb0.adm, 16);
+ break;
+ default: /* Can not happen */
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm)
{
struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
mutex_lock(&kvm->lock);
+ if (kvm_arch_crypto_test_masks(kvm, apm, aqm, adm)) {
+ mutex_unlock(&kvm->lock);
+ return;
+ }
kvm_s390_vcpu_block_all(kvm);
switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
There is no sense in blocking all vCPUs to set the masks in the guest's CRYCB if the mask values will not be changed, so let's verify Let's verify that mask values will be changed before blocking all vCPUs in order to set the crypto masks in the guest's CRYCB. Signed-off-by: Tony Krowiak <akrowiak@linux.ibm.com> --- arch/s390/kvm/kvm-s390.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+)