Message ID | 1405596148-1507-5-git-send-email-mihai.caraman@freescale.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 17.07.14 13:22, Mihai Caraman wrote: > On book3e, guest last instruction is read on the exit path using load > external pid (lwepx) dedicated instruction. This load operation may fail > due to TLB eviction and execute-but-not-read entries. > > This patch lay down the path for an alternative solution to read the guest > last instruction, by allowing kvmppc_get_lat_inst() function to fail. > Architecture specific implmentations of kvmppc_load_last_inst() may read > last guest instruction and instruct the emulation layer to re-execute the > guest in case of failure. > > Make kvmppc_get_last_inst() definition common between architectures. > > Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> > --- > v5 > - don't swap when load fail > - convert the return value space of kvmppc_ld() > > v4: > - these changes compile on book3s, please validate the functionality and > do the necessary adaptations! > - common declaration and enum for kvmppc_load_last_inst() > - remove kvmppc_read_inst() in a preceding patch > > v3: > - rework patch description > - add common definition for kvmppc_get_last_inst() > - check return values in book3s code > > v2: > - integrated kvmppc_get_last_inst() in book3s code and checked build > - addressed cosmetic feedback > > arch/powerpc/include/asm/kvm_book3s.h | 26 ------------- > arch/powerpc/include/asm/kvm_booke.h | 5 --- > arch/powerpc/include/asm/kvm_ppc.h | 25 +++++++++++++ > arch/powerpc/kvm/book3s.c | 17 +++++++++ > arch/powerpc/kvm/book3s_64_mmu_hv.c | 17 +++------ > arch/powerpc/kvm/book3s_paired_singles.c | 38 ++++++++++++------- > arch/powerpc/kvm/book3s_pr.c | 63 ++++++++++++++++++++++---------- > arch/powerpc/kvm/booke.c | 3 ++ > arch/powerpc/kvm/e500_mmu_host.c | 6 +++ > arch/powerpc/kvm/emulate.c | 18 ++++++--- > arch/powerpc/kvm/powerpc.c | 11 +++++- > 11 files changed, 144 insertions(+), 85 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h > index 20fb6f2..a86ca65 100644 > --- a/arch/powerpc/include/asm/kvm_book3s.h > +++ b/arch/powerpc/include/asm/kvm_book3s.h > @@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) > return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); > } > > -static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) > -{ > - /* Load the instruction manually if it failed to do so in the > - * exit path */ > - if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) > - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); > - > - return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : > - vcpu->arch.last_inst; > -} > - > -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) > -{ > - return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu)); > -} > - > -/* > - * Like kvmppc_get_last_inst(), but for fetching a sc instruction. > - * Because the sc instruction sets SRR0 to point to the following > - * instruction, we have to fetch from pc - 4. > - */ > -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) > -{ > - return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4); > -} > - > static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) > { > return vcpu->arch.fault_dar; > diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h > index c7aed61..cbb1990 100644 > --- a/arch/powerpc/include/asm/kvm_booke.h > +++ b/arch/powerpc/include/asm/kvm_booke.h > @@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) > return false; > } > > -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) > -{ > - return vcpu->arch.last_inst; > -} > - > static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) > { > vcpu->arch.ctr = val; > diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h > index e2fd5a1..7f9c634 100644 > --- a/arch/powerpc/include/asm/kvm_ppc.h > +++ b/arch/powerpc/include/asm/kvm_ppc.h > @@ -47,6 +47,11 @@ enum emulation_result { > EMULATE_EXIT_USER, /* emulation requires exit to user-space */ > }; > > +enum instruction_type { > + INST_GENERIC, > + INST_SC, /* system call */ > +}; > + > extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); > extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); > extern void kvmppc_handler_highmem(void); > @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, > u64 val, unsigned int bytes, > int is_default_endian); > > +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, > + enum instruction_type type, u32 *inst); > + > extern int kvmppc_emulate_instruction(struct kvm_run *run, > struct kvm_vcpu *vcpu); > extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); > @@ -234,6 +242,23 @@ struct kvmppc_ops { > extern struct kvmppc_ops *kvmppc_hv_ops; > extern struct kvmppc_ops *kvmppc_pr_ops; > > +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, > + enum instruction_type type, u32 *inst) > +{ > + int ret = EMULATE_DONE; > + > + /* Load the instruction manually if it failed to do so in the > + * exit path */ > + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) > + ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); > + > + > + *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ? > + swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; This makes even less sense than the previous version. Either you treat inst as "definitely overwritten" or as "preserves previous data on failure". So either you unconditionally swap like you did before or you do if (ret == EMULATE_DONE) *inst = kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; > + > + return ret; > +} > + > static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) > { > return kvm->arch.kvm_ops == kvmppc_hv_ops; > diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c > index 31facfc..522be6b 100644 > --- a/arch/powerpc/kvm/book3s.c > +++ b/arch/powerpc/kvm/book3s.c > @@ -488,6 +488,23 @@ mmio: > } > EXPORT_SYMBOL_GPL(kvmppc_ld); > > +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, > + u32 *inst) > +{ > + ulong pc = kvmppc_get_pc(vcpu); > + int r; > + > + if (type == INST_SC) > + pc -= 4; > + > + r = kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); inst is unused? The rest looks pretty nice though :). Alex -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
> -----Original Message----- > From: Alexander Graf [mailto:agraf@suse.de] > Sent: Thursday, July 17, 2014 5:21 PM > To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org > Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org > Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail > > > On 17.07.14 13:22, Mihai Caraman wrote: > > On book3e, guest last instruction is read on the exit path using load > > external pid (lwepx) dedicated instruction. This load operation may > fail > > due to TLB eviction and execute-but-not-read entries. > > > > This patch lay down the path for an alternative solution to read the > guest > > last instruction, by allowing kvmppc_get_lat_inst() function to fail. > > Architecture specific implmentations of kvmppc_load_last_inst() may > read > > last guest instruction and instruct the emulation layer to re-execute > the > > guest in case of failure. > > > > Make kvmppc_get_last_inst() definition common between architectures. > > > > Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> > > --- ... > > diff --git a/arch/powerpc/include/asm/kvm_ppc.h > b/arch/powerpc/include/asm/kvm_ppc.h > > index e2fd5a1..7f9c634 100644 > > --- a/arch/powerpc/include/asm/kvm_ppc.h > > +++ b/arch/powerpc/include/asm/kvm_ppc.h > > @@ -47,6 +47,11 @@ enum emulation_result { > > EMULATE_EXIT_USER, /* emulation requires exit to user-space */ > > }; > > > > +enum instruction_type { > > + INST_GENERIC, > > + INST_SC, /* system call */ > > +}; > > + > > extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu > *vcpu); > > extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu > *vcpu); > > extern void kvmppc_handler_highmem(void); > > @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, > struct kvm_vcpu *vcpu, > > u64 val, unsigned int bytes, > > int is_default_endian); > > > > +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, > > + enum instruction_type type, u32 *inst); > > + > > extern int kvmppc_emulate_instruction(struct kvm_run *run, > > struct kvm_vcpu *vcpu); > > extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu > *vcpu); > > @@ -234,6 +242,23 @@ struct kvmppc_ops { > > extern struct kvmppc_ops *kvmppc_hv_ops; > > extern struct kvmppc_ops *kvmppc_pr_ops; > > > > +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, > > + enum instruction_type type, u32 *inst) > > +{ > > + int ret = EMULATE_DONE; > > + > > + /* Load the instruction manually if it failed to do so in the > > + * exit path */ > > + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) > > + ret = kvmppc_load_last_inst(vcpu, type, &vcpu- > >arch.last_inst); > > + > > + > > + *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ? > > + swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; > > This makes even less sense than the previous version. Either you treat > inst as "definitely overwritten" or as "preserves previous data on > failure". Both v4 and v5 versions treat inst as "definitely overwritten". > > So either you unconditionally swap like you did before If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is operated in host endianness, so it doesn't need byte swap. I agree with your reasoning if last_inst is initialized and compared with data in guest endianess, which is not the case yet for KVM_INST_FETCH_FAILED. > or you do > > if (ret == EMULATE_DONE) > *inst = kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : > vcpu->arch.last_inst; -Mike -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
PiAtLS0tLU9yaWdpbmFsIE1lc3NhZ2UtLS0tLQ0KPiBGcm9tOiBMaW51eHBwYy1kZXYgW21haWx0 bzpsaW51eHBwYy1kZXYtDQo+IGJvdW5jZXMrbWloYWkuY2FyYW1hbj1mcmVlc2NhbGUuY29tQGxp c3RzLm96bGFicy5vcmddIE9uIEJlaGFsZiBPZg0KPiBtaWhhaS5jYXJhbWFuQGZyZWVzY2FsZS5j b20NCj4gU2VudDogRnJpZGF5LCBKdWx5IDE4LCAyMDE0IDEyOjA2IFBNDQo+IFRvOiBBbGV4YW5k ZXIgR3JhZjsga3ZtLXBwY0B2Z2VyLmtlcm5lbC5vcmcNCj4gQ2M6IGxpbnV4cHBjLWRldkBsaXN0 cy5vemxhYnMub3JnOyBrdm1Admdlci5rZXJuZWwub3JnDQo+IFN1YmplY3Q6IFJFOiBbUEFUQ0gg djUgNC81XSBLVk06IFBQQzogQWxvdyBrdm1wcGNfZ2V0X2xhc3RfaW5zdCgpIHRvIGZhaWwNCj4g DQo+ID4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gPiBGcm9tOiBBbGV4YW5kZXIgR3Jh ZiBbbWFpbHRvOmFncmFmQHN1c2UuZGVdDQo+ID4gU2VudDogVGh1cnNkYXksIEp1bHkgMTcsIDIw MTQgNToyMSBQTQ0KPiA+IFRvOiBDYXJhbWFuIE1paGFpIENsYXVkaXUtQjAyMDA4OyBrdm0tcHBj QHZnZXIua2VybmVsLm9yZw0KPiA+IENjOiBrdm1Admdlci5rZXJuZWwub3JnOyBsaW51eHBwYy1k ZXZAbGlzdHMub3psYWJzLm9yZw0KPiA+IFN1YmplY3Q6IFJlOiBbUEFUQ0ggdjUgNC81XSBLVk06 IFBQQzogQWxvdyBrdm1wcGNfZ2V0X2xhc3RfaW5zdCgpIHRvDQo+IGZhaWwNCj4gPg0KPiA+DQo+ ID4gT24gMTcuMDcuMTQgMTM6MjIsIE1paGFpIENhcmFtYW4gd3JvdGU6DQo+ID4gPiBPbiBib29r M2UsIGd1ZXN0IGxhc3QgaW5zdHJ1Y3Rpb24gaXMgcmVhZCBvbiB0aGUgZXhpdCBwYXRoIHVzaW5n IGxvYWQNCj4gPiA+IGV4dGVybmFsIHBpZCAobHdlcHgpIGRlZGljYXRlZCBpbnN0cnVjdGlvbi4g VGhpcyBsb2FkIG9wZXJhdGlvbiBtYXkNCj4gPiBmYWlsDQo+ID4gPiBkdWUgdG8gVExCIGV2aWN0 aW9uIGFuZCBleGVjdXRlLWJ1dC1ub3QtcmVhZCBlbnRyaWVzLg0KPiA+ID4NCj4gPiA+IFRoaXMg cGF0Y2ggbGF5IGRvd24gdGhlIHBhdGggZm9yIGFuIGFsdGVybmF0aXZlIHNvbHV0aW9uIHRvIHJl YWQgdGhlDQo+ID4gZ3Vlc3QNCj4gPiA+IGxhc3QgaW5zdHJ1Y3Rpb24sIGJ5IGFsbG93aW5nIGt2 bXBwY19nZXRfbGF0X2luc3QoKSBmdW5jdGlvbiB0byBmYWlsLg0KPiA+ID4gQXJjaGl0ZWN0dXJl IHNwZWNpZmljIGltcGxtZW50YXRpb25zIG9mIGt2bXBwY19sb2FkX2xhc3RfaW5zdCgpIG1heQ0K PiA+IHJlYWQNCj4gPiA+IGxhc3QgZ3Vlc3QgaW5zdHJ1Y3Rpb24gYW5kIGluc3RydWN0IHRoZSBl bXVsYXRpb24gbGF5ZXIgdG8gcmUtZXhlY3V0ZQ0KPiA+IHRoZQ0KPiA+ID4gZ3Vlc3QgaW4gY2Fz ZSBvZiBmYWlsdXJlLg0KPiA+ID4NCj4gPiA+IE1ha2Uga3ZtcHBjX2dldF9sYXN0X2luc3QoKSBk ZWZpbml0aW9uIGNvbW1vbiBiZXR3ZWVuIGFyY2hpdGVjdHVyZXMuDQo+ID4gPg0KPiA+ID4gU2ln bmVkLW9mZi1ieTogTWloYWkgQ2FyYW1hbiA8bWloYWkuY2FyYW1hbkBmcmVlc2NhbGUuY29tPg0K PiA+ID4gLS0tDQo+IA0KPiAuLi4NCj4gDQo+ID4gPiBkaWZmIC0tZ2l0IGEvYXJjaC9wb3dlcnBj L2luY2x1ZGUvYXNtL2t2bV9wcGMuaA0KPiA+IGIvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2t2 bV9wcGMuaA0KPiA+ID4gaW5kZXggZTJmZDVhMS4uN2Y5YzYzNCAxMDA2NDQNCj4gPiA+IC0tLSBh L2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9rdm1fcHBjLmgNCj4gPiA+ICsrKyBiL2FyY2gvcG93 ZXJwYy9pbmNsdWRlL2FzbS9rdm1fcHBjLmgNCj4gPiA+IEBAIC00Nyw2ICs0NywxMSBAQCBlbnVt IGVtdWxhdGlvbl9yZXN1bHQgew0KPiA+ID4gICAJRU1VTEFURV9FWElUX1VTRVIsICAgIC8qIGVt dWxhdGlvbiByZXF1aXJlcyBleGl0IHRvIHVzZXItDQo+IHNwYWNlICovDQo+ID4gPiAgIH07DQo+ ID4gPg0KPiA+ID4gK2VudW0gaW5zdHJ1Y3Rpb25fdHlwZSB7DQo+ID4gPiArCUlOU1RfR0VORVJJ QywNCj4gPiA+ICsJSU5TVF9TQywJCS8qIHN5c3RlbSBjYWxsICovDQo+ID4gPiArfTsNCj4gPiA+ ICsNCj4gPiA+ICAgZXh0ZXJuIGludCBrdm1wcGNfdmNwdV9ydW4oc3RydWN0IGt2bV9ydW4gKmt2 bV9ydW4sIHN0cnVjdCBrdm1fdmNwdQ0KPiA+ICp2Y3B1KTsNCj4gPiA+ICAgZXh0ZXJuIGludCBf X2t2bXBwY192Y3B1X3J1bihzdHJ1Y3Qga3ZtX3J1biAqa3ZtX3J1biwgc3RydWN0DQo+IGt2bV92 Y3B1DQo+ID4gKnZjcHUpOw0KPiA+ID4gICBleHRlcm4gdm9pZCBrdm1wcGNfaGFuZGxlcl9oaWdo bWVtKHZvaWQpOw0KPiA+ID4gQEAgLTYyLDYgKzY3LDkgQEAgZXh0ZXJuIGludCBrdm1wcGNfaGFu ZGxlX3N0b3JlKHN0cnVjdCBrdm1fcnVuICpydW4sDQo+ID4gc3RydWN0IGt2bV92Y3B1ICp2Y3B1 LA0KPiA+ID4gICAJCQkgICAgICAgdTY0IHZhbCwgdW5zaWduZWQgaW50IGJ5dGVzLA0KPiA+ID4g ICAJCQkgICAgICAgaW50IGlzX2RlZmF1bHRfZW5kaWFuKTsNCj4gPiA+DQo+ID4gPiArZXh0ZXJu IGludCBrdm1wcGNfbG9hZF9sYXN0X2luc3Qoc3RydWN0IGt2bV92Y3B1ICp2Y3B1LA0KPiA+ID4g KwkJCQkgZW51bSBpbnN0cnVjdGlvbl90eXBlIHR5cGUsIHUzMiAqaW5zdCk7DQo+ID4gPiArDQo+ ID4gPiAgIGV4dGVybiBpbnQga3ZtcHBjX2VtdWxhdGVfaW5zdHJ1Y3Rpb24oc3RydWN0IGt2bV9y dW4gKnJ1biwNCj4gPiA+ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBz dHJ1Y3Qga3ZtX3ZjcHUgKnZjcHUpOw0KPiA+ID4gICBleHRlcm4gaW50IGt2bXBwY19lbXVsYXRl X21taW8oc3RydWN0IGt2bV9ydW4gKnJ1biwgc3RydWN0IGt2bV92Y3B1DQo+ID4gKnZjcHUpOw0K PiA+ID4gQEAgLTIzNCw2ICsyNDIsMjMgQEAgc3RydWN0IGt2bXBwY19vcHMgew0KPiA+ID4gICBl eHRlcm4gc3RydWN0IGt2bXBwY19vcHMgKmt2bXBwY19odl9vcHM7DQo+ID4gPiAgIGV4dGVybiBz dHJ1Y3Qga3ZtcHBjX29wcyAqa3ZtcHBjX3ByX29wczsNCj4gPiA+DQo+ID4gPiArc3RhdGljIGlu bGluZSBpbnQga3ZtcHBjX2dldF9sYXN0X2luc3Qoc3RydWN0IGt2bV92Y3B1ICp2Y3B1LA0KPiA+ ID4gKwkJCQkJZW51bSBpbnN0cnVjdGlvbl90eXBlIHR5cGUsIHUzMiAqaW5zdCkNCj4gPiA+ICt7 DQo+ID4gPiArCWludCByZXQgPSBFTVVMQVRFX0RPTkU7DQo+ID4gPiArDQo+ID4gPiArCS8qIExv YWQgdGhlIGluc3RydWN0aW9uIG1hbnVhbGx5IGlmIGl0IGZhaWxlZCB0byBkbyBzbyBpbiB0aGUN Cj4gPiA+ICsJICogZXhpdCBwYXRoICovDQo+ID4gPiArCWlmICh2Y3B1LT5hcmNoLmxhc3RfaW5z dCA9PSBLVk1fSU5TVF9GRVRDSF9GQUlMRUQpDQo+ID4gPiArCQlyZXQgPSBrdm1wcGNfbG9hZF9s YXN0X2luc3QodmNwdSwgdHlwZSwgJnZjcHUtDQo+ID4gPmFyY2gubGFzdF9pbnN0KTsNCj4gPiA+ ICsNCj4gPiA+ICsNCj4gPiA+ICsJKmluc3QgPSAocmV0ID09IEVNVUxBVEVfRE9ORSAmJiBrdm1w cGNfbmVlZF9ieXRlc3dhcCh2Y3B1KSkgPw0KPiA+ID4gKwkJc3dhYjMyKHZjcHUtPmFyY2gubGFz dF9pbnN0KSA6IHZjcHUtPmFyY2gubGFzdF9pbnN0Ow0KPiA+DQo+ID4gVGhpcyBtYWtlcyBldmVu IGxlc3Mgc2Vuc2UgdGhhbiB0aGUgcHJldmlvdXMgdmVyc2lvbi4gRWl0aGVyIHlvdSB0cmVhdA0K PiA+IGluc3QgYXMgImRlZmluaXRlbHkgb3ZlcndyaXR0ZW4iIG9yIGFzICJwcmVzZXJ2ZXMgcHJl dmlvdXMgZGF0YSBvbg0KPiA+IGZhaWx1cmUiLg0KPiANCj4gQm90aCB2NCBhbmQgdjUgdmVyc2lv bnMgdHJlYXQgaW5zdCBhcyAiZGVmaW5pdGVseSBvdmVyd3JpdHRlbiIuDQo+IA0KPiA+DQo+ID4g U28gZWl0aGVyIHlvdSB1bmNvbmRpdGlvbmFsbHkgc3dhcCBsaWtlIHlvdSBkaWQgYmVmb3JlDQo+ IA0KPiBJZiB3ZSBtYWtlIGFic3RyYWN0aW9uIG9mIGl0cyBzeW1tZXRyeSwgS1ZNX0lOU1RfRkVU Q0hfRkFJTEVEIGlzIG9wZXJhdGVkDQo+IGluIGhvc3QgZW5kaWFubmVzcywgc28gaXQgZG9lc24n dCBuZWVkIGJ5dGUgc3dhcC4NCj4gDQo+IEkgYWdyZWUgd2l0aCB5b3VyIHJlYXNvbmluZyBpZiBs YXN0X2luc3QgaXMgaW5pdGlhbGl6ZWQgYW5kIGNvbXBhcmVkIHdpdGgNCj4gZGF0YSBpbiBndWVz dCBlbmRpYW5lc3MsIHdoaWNoIGlzIG5vdCB0aGUgY2FzZSB5ZXQgZm9yDQo+IEtWTV9JTlNUX0ZF VENIX0ZBSUxFRC4NCg0KQWxleCwgYXJlIHlvdSByZWx5aW5nIG9uIHRoZSBmYWN0IHRoYXQgS1ZN X0lOU1RfRkVUQ0hfRkFJTEVEIHZhbHVlIGlzIHN5bW1ldHJpY2FsPw0KV2l0aCBhIG5vbiBzeW1t ZXRyaWNhbCB2YWx1ZSBsaWtlIDB4REVBREJFRUYsIGFuZCBjb25zaWRlcmluZyBhIGxpdHRsZS1l bmRpYW4gZ3Vlc3QNCm9uIGEgYmlnLWVuZGlhbiBob3N0LCB3ZSBuZWVkIHRvIGZpeCBrdm0gbG9n aWMgdG8gaW5pdGlhbGl6ZSBhbmQgY29tcGFyZSBsYXN0X2luc3QNCndpdGggMHhFRkJFQURERSBz d2FwZWQgdmFsdWUuDQoNCllvdXIgc3VnZ2VzdGlvbiB0byB1bmNvbmRpdGlvbmFsbHkgc3dhcCBt YWtlcyBzZW5zZSBvbmx5IHdpdGggdGhlIGFib3ZlIGZpeCwgb3RoZXJ3aXNlDQppbnN0IG1heSBl bmQgdXAgd2l0aCAweEVGQkVBRERFIHN3YXBlZCB2YWx1ZSB3aXRoIGlzIHdyb25nLg0KDQotTWlr ZQ0K -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 21.07.14 11:59, mihai.caraman@freescale.com wrote: >> -----Original Message----- >> From: Linuxppc-dev [mailto:linuxppc-dev- >> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of >> mihai.caraman@freescale.com >> Sent: Friday, July 18, 2014 12:06 PM >> To: Alexander Graf; kvm-ppc@vger.kernel.org >> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org >> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail >> >>> -----Original Message----- >>> From: Alexander Graf [mailto:agraf@suse.de] >>> Sent: Thursday, July 17, 2014 5:21 PM >>> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org >>> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org >>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to >> fail >>> >>> On 17.07.14 13:22, Mihai Caraman wrote: >>>> On book3e, guest last instruction is read on the exit path using load >>>> external pid (lwepx) dedicated instruction. This load operation may >>> fail >>>> due to TLB eviction and execute-but-not-read entries. >>>> >>>> This patch lay down the path for an alternative solution to read the >>> guest >>>> last instruction, by allowing kvmppc_get_lat_inst() function to fail. >>>> Architecture specific implmentations of kvmppc_load_last_inst() may >>> read >>>> last guest instruction and instruct the emulation layer to re-execute >>> the >>>> guest in case of failure. >>>> >>>> Make kvmppc_get_last_inst() definition common between architectures. >>>> >>>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> >>>> --- >> ... >> >>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h >>> b/arch/powerpc/include/asm/kvm_ppc.h >>>> index e2fd5a1..7f9c634 100644 >>>> --- a/arch/powerpc/include/asm/kvm_ppc.h >>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h >>>> @@ -47,6 +47,11 @@ enum emulation_result { >>>> EMULATE_EXIT_USER, /* emulation requires exit to user- >> space */ >>>> }; >>>> >>>> +enum instruction_type { >>>> + INST_GENERIC, >>>> + INST_SC, /* system call */ >>>> +}; >>>> + >>>> extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu >>> *vcpu); >>>> extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct >> kvm_vcpu >>> *vcpu); >>>> extern void kvmppc_handler_highmem(void); >>>> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, >>> struct kvm_vcpu *vcpu, >>>> u64 val, unsigned int bytes, >>>> int is_default_endian); >>>> >>>> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, >>>> + enum instruction_type type, u32 *inst); >>>> + >>>> extern int kvmppc_emulate_instruction(struct kvm_run *run, >>>> struct kvm_vcpu *vcpu); >>>> extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu >>> *vcpu); >>>> @@ -234,6 +242,23 @@ struct kvmppc_ops { >>>> extern struct kvmppc_ops *kvmppc_hv_ops; >>>> extern struct kvmppc_ops *kvmppc_pr_ops; >>>> >>>> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, >>>> + enum instruction_type type, u32 *inst) >>>> +{ >>>> + int ret = EMULATE_DONE; >>>> + >>>> + /* Load the instruction manually if it failed to do so in the >>>> + * exit path */ >>>> + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) >>>> + ret = kvmppc_load_last_inst(vcpu, type, &vcpu- >>>> arch.last_inst); >>>> + >>>> + >>>> + *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ? >>>> + swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; >>> This makes even less sense than the previous version. Either you treat >>> inst as "definitely overwritten" or as "preserves previous data on >>> failure". >> Both v4 and v5 versions treat inst as "definitely overwritten". >> >>> So either you unconditionally swap like you did before >> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is operated >> in host endianness, so it doesn't need byte swap. >> >> I agree with your reasoning if last_inst is initialized and compared with >> data in guest endianess, which is not the case yet for >> KVM_INST_FETCH_FAILED. > Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is symmetrical? > With a non symmetrical value like 0xDEADBEEF, and considering a little-endian guest > on a big-endian host, we need to fix kvm logic to initialize and compare last_inst > with 0xEFBEADDE swaped value. > > Your suggestion to unconditionally swap makes sense only with the above fix, otherwise > inst may end up with 0xEFBEADDE swaped value with is wrong. Only for *inst which we would treat as "undefined" after the function returned EMULATE_AGAIN. last_inst stays unmodified. Alex -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
PiAtLS0tLU9yaWdpbmFsIE1lc3NhZ2UtLS0tLQ0KPiBGcm9tOiBrdm0tcHBjLW93bmVyQHZnZXIu a2VybmVsLm9yZyBbbWFpbHRvOmt2bS1wcGMtDQo+IG93bmVyQHZnZXIua2VybmVsLm9yZ10gT24g QmVoYWxmIE9mIEFsZXhhbmRlciBHcmFmDQo+IFNlbnQ6IFdlZG5lc2RheSwgSnVseSAyMywgMjAx NCAxMjoyMSBBTQ0KPiBUbzogQ2FyYW1hbiBNaWhhaSBDbGF1ZGl1LUIwMjAwOA0KPiBDYzoga3Zt LXBwY0B2Z2VyLmtlcm5lbC5vcmc7IGxpbnV4cHBjLWRldkBsaXN0cy5vemxhYnMub3JnOw0KPiBr dm1Admdlci5rZXJuZWwub3JnDQo+IFN1YmplY3Q6IFJlOiBbUEFUQ0ggdjUgNC81XSBLVk06IFBQ QzogQWxvdyBrdm1wcGNfZ2V0X2xhc3RfaW5zdCgpIHRvIGZhaWwNCj4gDQo+IA0KPiBPbiAyMS4w Ny4xNCAxMTo1OSwgbWloYWkuY2FyYW1hbkBmcmVlc2NhbGUuY29tIHdyb3RlOg0KPiA+PiAtLS0t LU9yaWdpbmFsIE1lc3NhZ2UtLS0tLQ0KPiA+PiBGcm9tOiBMaW51eHBwYy1kZXYgW21haWx0bzps aW51eHBwYy1kZXYtDQo+ID4+IGJvdW5jZXMrbWloYWkuY2FyYW1hbj1mcmVlc2NhbGUuY29tQGxp c3RzLm96bGFicy5vcmddIE9uIEJlaGFsZiBPZg0KPiA+PiBtaWhhaS5jYXJhbWFuQGZyZWVzY2Fs ZS5jb20NCj4gPj4gU2VudDogRnJpZGF5LCBKdWx5IDE4LCAyMDE0IDEyOjA2IFBNDQo+ID4+IFRv OiBBbGV4YW5kZXIgR3JhZjsga3ZtLXBwY0B2Z2VyLmtlcm5lbC5vcmcNCj4gPj4gQ2M6IGxpbnV4 cHBjLWRldkBsaXN0cy5vemxhYnMub3JnOyBrdm1Admdlci5rZXJuZWwub3JnDQo+ID4+IFN1Ympl Y3Q6IFJFOiBbUEFUQ0ggdjUgNC81XSBLVk06IFBQQzogQWxvdyBrdm1wcGNfZ2V0X2xhc3RfaW5z dCgpIHRvDQo+IGZhaWwNCj4gPj4NCj4gPj4+IC0tLS0tT3JpZ2luYWwgTWVzc2FnZS0tLS0tDQo+ ID4+PiBGcm9tOiBBbGV4YW5kZXIgR3JhZiBbbWFpbHRvOmFncmFmQHN1c2UuZGVdDQo+ID4+PiBT ZW50OiBUaHVyc2RheSwgSnVseSAxNywgMjAxNCA1OjIxIFBNDQo+ID4+PiBUbzogQ2FyYW1hbiBN aWhhaSBDbGF1ZGl1LUIwMjAwODsga3ZtLXBwY0B2Z2VyLmtlcm5lbC5vcmcNCj4gPj4+IENjOiBr dm1Admdlci5rZXJuZWwub3JnOyBsaW51eHBwYy1kZXZAbGlzdHMub3psYWJzLm9yZw0KPiA+Pj4g U3ViamVjdDogUmU6IFtQQVRDSCB2NSA0LzVdIEtWTTogUFBDOiBBbG93IGt2bXBwY19nZXRfbGFz dF9pbnN0KCkgdG8NCj4gPj4gZmFpbA0KPiA+Pj4NCj4gPj4+IE9uIDE3LjA3LjE0IDEzOjIyLCBN aWhhaSBDYXJhbWFuIHdyb3RlOg0KPiA+Pj4+IE9uIGJvb2szZSwgZ3Vlc3QgbGFzdCBpbnN0cnVj dGlvbiBpcyByZWFkIG9uIHRoZSBleGl0IHBhdGggdXNpbmcNCj4gbG9hZA0KPiA+Pj4+IGV4dGVy bmFsIHBpZCAobHdlcHgpIGRlZGljYXRlZCBpbnN0cnVjdGlvbi4gVGhpcyBsb2FkIG9wZXJhdGlv biBtYXkNCj4gPj4+IGZhaWwNCj4gPj4+PiBkdWUgdG8gVExCIGV2aWN0aW9uIGFuZCBleGVjdXRl LWJ1dC1ub3QtcmVhZCBlbnRyaWVzLg0KPiA+Pj4+DQo+ID4+Pj4gVGhpcyBwYXRjaCBsYXkgZG93 biB0aGUgcGF0aCBmb3IgYW4gYWx0ZXJuYXRpdmUgc29sdXRpb24gdG8gcmVhZCB0aGUNCj4gPj4+ IGd1ZXN0DQo+ID4+Pj4gbGFzdCBpbnN0cnVjdGlvbiwgYnkgYWxsb3dpbmcga3ZtcHBjX2dldF9s YXRfaW5zdCgpIGZ1bmN0aW9uIHRvDQo+IGZhaWwuDQo+ID4+Pj4gQXJjaGl0ZWN0dXJlIHNwZWNp ZmljIGltcGxtZW50YXRpb25zIG9mIGt2bXBwY19sb2FkX2xhc3RfaW5zdCgpIG1heQ0KPiA+Pj4g cmVhZA0KPiA+Pj4+IGxhc3QgZ3Vlc3QgaW5zdHJ1Y3Rpb24gYW5kIGluc3RydWN0IHRoZSBlbXVs YXRpb24gbGF5ZXIgdG8gcmUtDQo+IGV4ZWN1dGUNCj4gPj4+IHRoZQ0KPiA+Pj4+IGd1ZXN0IGlu IGNhc2Ugb2YgZmFpbHVyZS4NCj4gPj4+Pg0KPiA+Pj4+IE1ha2Uga3ZtcHBjX2dldF9sYXN0X2lu c3QoKSBkZWZpbml0aW9uIGNvbW1vbiBiZXR3ZWVuIGFyY2hpdGVjdHVyZXMuDQo+ID4+Pj4NCj4g Pj4+PiBTaWduZWQtb2ZmLWJ5OiBNaWhhaSBDYXJhbWFuIDxtaWhhaS5jYXJhbWFuQGZyZWVzY2Fs ZS5jb20+DQo+ID4+Pj4gLS0tDQo+ID4+IC4uLg0KPiA+Pg0KPiA+Pj4+IGRpZmYgLS1naXQgYS9h cmNoL3Bvd2VycGMvaW5jbHVkZS9hc20va3ZtX3BwYy5oDQo+ID4+PiBiL2FyY2gvcG93ZXJwYy9p bmNsdWRlL2FzbS9rdm1fcHBjLmgNCj4gPj4+PiBpbmRleCBlMmZkNWExLi43ZjljNjM0IDEwMDY0 NA0KPiA+Pj4+IC0tLSBhL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9rdm1fcHBjLmgNCj4gPj4+ PiArKysgYi9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20va3ZtX3BwYy5oDQo+ID4+Pj4gQEAgLTQ3 LDYgKzQ3LDExIEBAIGVudW0gZW11bGF0aW9uX3Jlc3VsdCB7DQo+ID4+Pj4gICAgCUVNVUxBVEVf RVhJVF9VU0VSLCAgICAvKiBlbXVsYXRpb24gcmVxdWlyZXMgZXhpdCB0byB1c2VyLQ0KPiA+PiBz cGFjZSAqLw0KPiA+Pj4+ICAgIH07DQo+ID4+Pj4NCj4gPj4+PiArZW51bSBpbnN0cnVjdGlvbl90 eXBlIHsNCj4gPj4+PiArCUlOU1RfR0VORVJJQywNCj4gPj4+PiArCUlOU1RfU0MsCQkvKiBzeXN0 ZW0gY2FsbCAqLw0KPiA+Pj4+ICt9Ow0KPiA+Pj4+ICsNCj4gPj4+PiAgICBleHRlcm4gaW50IGt2 bXBwY192Y3B1X3J1bihzdHJ1Y3Qga3ZtX3J1biAqa3ZtX3J1biwgc3RydWN0DQo+IGt2bV92Y3B1 DQo+ID4+PiAqdmNwdSk7DQo+ID4+Pj4gICAgZXh0ZXJuIGludCBfX2t2bXBwY192Y3B1X3J1bihz dHJ1Y3Qga3ZtX3J1biAqa3ZtX3J1biwgc3RydWN0DQo+ID4+IGt2bV92Y3B1DQo+ID4+PiAqdmNw dSk7DQo+ID4+Pj4gICAgZXh0ZXJuIHZvaWQga3ZtcHBjX2hhbmRsZXJfaGlnaG1lbSh2b2lkKTsN Cj4gPj4+PiBAQCAtNjIsNiArNjcsOSBAQCBleHRlcm4gaW50IGt2bXBwY19oYW5kbGVfc3RvcmUo c3RydWN0IGt2bV9ydW4NCj4gKnJ1biwNCj4gPj4+IHN0cnVjdCBrdm1fdmNwdSAqdmNwdSwNCj4g Pj4+PiAgICAJCQkgICAgICAgdTY0IHZhbCwgdW5zaWduZWQgaW50IGJ5dGVzLA0KPiA+Pj4+ICAg IAkJCSAgICAgICBpbnQgaXNfZGVmYXVsdF9lbmRpYW4pOw0KPiA+Pj4+DQo+ID4+Pj4gK2V4dGVy biBpbnQga3ZtcHBjX2xvYWRfbGFzdF9pbnN0KHN0cnVjdCBrdm1fdmNwdSAqdmNwdSwNCj4gPj4+ PiArCQkJCSBlbnVtIGluc3RydWN0aW9uX3R5cGUgdHlwZSwgdTMyICppbnN0KTsNCj4gPj4+PiAr DQo+ID4+Pj4gICAgZXh0ZXJuIGludCBrdm1wcGNfZW11bGF0ZV9pbnN0cnVjdGlvbihzdHJ1Y3Qg a3ZtX3J1biAqcnVuLA0KPiA+Pj4+ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgc3RydWN0IGt2bV92Y3B1ICp2Y3B1KTsNCj4gPj4+PiAgICBleHRlcm4gaW50IGt2bXBw Y19lbXVsYXRlX21taW8oc3RydWN0IGt2bV9ydW4gKnJ1biwgc3RydWN0DQo+IGt2bV92Y3B1DQo+ ID4+PiAqdmNwdSk7DQo+ID4+Pj4gQEAgLTIzNCw2ICsyNDIsMjMgQEAgc3RydWN0IGt2bXBwY19v cHMgew0KPiA+Pj4+ICAgIGV4dGVybiBzdHJ1Y3Qga3ZtcHBjX29wcyAqa3ZtcHBjX2h2X29wczsN Cj4gPj4+PiAgICBleHRlcm4gc3RydWN0IGt2bXBwY19vcHMgKmt2bXBwY19wcl9vcHM7DQo+ID4+ Pj4NCj4gPj4+PiArc3RhdGljIGlubGluZSBpbnQga3ZtcHBjX2dldF9sYXN0X2luc3Qoc3RydWN0 IGt2bV92Y3B1ICp2Y3B1LA0KPiA+Pj4+ICsJCQkJCWVudW0gaW5zdHJ1Y3Rpb25fdHlwZSB0eXBl LCB1MzINCj4gKmluc3QpDQo+ID4+Pj4gK3sNCj4gPj4+PiArCWludCByZXQgPSBFTVVMQVRFX0RP TkU7DQo+ID4+Pj4gKw0KPiA+Pj4+ICsJLyogTG9hZCB0aGUgaW5zdHJ1Y3Rpb24gbWFudWFsbHkg aWYgaXQgZmFpbGVkIHRvIGRvIHNvIGluIHRoZQ0KPiA+Pj4+ICsJICogZXhpdCBwYXRoICovDQo+ ID4+Pj4gKwlpZiAodmNwdS0+YXJjaC5sYXN0X2luc3QgPT0gS1ZNX0lOU1RfRkVUQ0hfRkFJTEVE KQ0KPiA+Pj4+ICsJCXJldCA9IGt2bXBwY19sb2FkX2xhc3RfaW5zdCh2Y3B1LCB0eXBlLCAmdmNw dS0NCj4gPj4+PiBhcmNoLmxhc3RfaW5zdCk7DQo+ID4+Pj4gKw0KPiA+Pj4+ICsNCj4gPj4+PiAr CSppbnN0ID0gKHJldCA9PSBFTVVMQVRFX0RPTkUgJiYga3ZtcHBjX25lZWRfYnl0ZXN3YXAodmNw dSkpID8NCj4gPj4+PiArCQlzd2FiMzIodmNwdS0+YXJjaC5sYXN0X2luc3QpIDogdmNwdS0+YXJj aC5sYXN0X2luc3Q7DQo+ID4+PiBUaGlzIG1ha2VzIGV2ZW4gbGVzcyBzZW5zZSB0aGFuIHRoZSBw cmV2aW91cyB2ZXJzaW9uLiBFaXRoZXIgeW91DQo+IHRyZWF0DQo+ID4+PiBpbnN0IGFzICJkZWZp bml0ZWx5IG92ZXJ3cml0dGVuIiBvciBhcyAicHJlc2VydmVzIHByZXZpb3VzIGRhdGEgb24NCj4g Pj4+IGZhaWx1cmUiLg0KPiA+PiBCb3RoIHY0IGFuZCB2NSB2ZXJzaW9ucyB0cmVhdCBpbnN0IGFz ICJkZWZpbml0ZWx5IG92ZXJ3cml0dGVuIi4NCj4gPj4NCj4gPj4+IFNvIGVpdGhlciB5b3UgdW5j b25kaXRpb25hbGx5IHN3YXAgbGlrZSB5b3UgZGlkIGJlZm9yZQ0KPiA+PiBJZiB3ZSBtYWtlIGFi c3RyYWN0aW9uIG9mIGl0cyBzeW1tZXRyeSwgS1ZNX0lOU1RfRkVUQ0hfRkFJTEVEIGlzDQo+IG9w ZXJhdGVkDQo+ID4+IGluIGhvc3QgZW5kaWFubmVzcywgc28gaXQgZG9lc24ndCBuZWVkIGJ5dGUg c3dhcC4NCj4gPj4NCj4gPj4gSSBhZ3JlZSB3aXRoIHlvdXIgcmVhc29uaW5nIGlmIGxhc3RfaW5z dCBpcyBpbml0aWFsaXplZCBhbmQgY29tcGFyZWQNCj4gd2l0aA0KPiA+PiBkYXRhIGluIGd1ZXN0 IGVuZGlhbmVzcywgd2hpY2ggaXMgbm90IHRoZSBjYXNlIHlldCBmb3INCj4gPj4gS1ZNX0lOU1Rf RkVUQ0hfRkFJTEVELg0KPiA+IEFsZXgsIGFyZSB5b3UgcmVseWluZyBvbiB0aGUgZmFjdCB0aGF0 IEtWTV9JTlNUX0ZFVENIX0ZBSUxFRCB2YWx1ZSBpcw0KPiBzeW1tZXRyaWNhbD8NCj4gPiBXaXRo IGEgbm9uIHN5bW1ldHJpY2FsIHZhbHVlIGxpa2UgMHhERUFEQkVFRiwgYW5kIGNvbnNpZGVyaW5n IGEgbGl0dGxlLQ0KPiBlbmRpYW4gZ3Vlc3QNCj4gPiBvbiBhIGJpZy1lbmRpYW4gaG9zdCwgd2Ug bmVlZCB0byBmaXgga3ZtIGxvZ2ljIHRvIGluaXRpYWxpemUgYW5kDQo+IGNvbXBhcmUgbGFzdF9p bnN0DQo+ID4gd2l0aCAweEVGQkVBRERFIHN3YXBlZCB2YWx1ZS4NCj4gPg0KPiA+IFlvdXIgc3Vn Z2VzdGlvbiB0byB1bmNvbmRpdGlvbmFsbHkgc3dhcCBtYWtlcyBzZW5zZSBvbmx5IHdpdGggdGhl IGFib3ZlDQo+IGZpeCwgb3RoZXJ3aXNlDQo+ID4gaW5zdCBtYXkgZW5kIHVwIHdpdGggMHhFRkJF QURERSBzd2FwZWQgdmFsdWUgd2l0aCBpcyB3cm9uZy4NCj4gDQo+IE9ubHkgZm9yICppbnN0IHdo aWNoIHdlIHdvdWxkIHRyZWF0IGFzICJ1bmRlZmluZWQiIGFmdGVyIHRoZSBmdW5jdGlvbg0KPiBy ZXR1cm5lZCBFTVVMQVRFX0FHQUlOLiANCg0KUmlnaHQuIFdpdGggdGhpcyBkbyB5b3UgYWNrbm93 bGVkZ2UgdGhhdCB2NSAoZGVmaW5pdGVseSBvdmVyd3JpdHRlbiBhcHByb2FjaCkNCmlzIG9rPw0K DQotTWlrZQ0K -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
Am 23.07.2014 um 10:24 schrieb "mihai.caraman@freescale.com" <mihai.caraman@freescale.com>: >> -----Original Message----- >> From: kvm-ppc-owner@vger.kernel.org [mailto:kvm-ppc- >> owner@vger.kernel.org] On Behalf Of Alexander Graf >> Sent: Wednesday, July 23, 2014 12:21 AM >> To: Caraman Mihai Claudiu-B02008 >> Cc: kvm-ppc@vger.kernel.org; linuxppc-dev@lists.ozlabs.org; >> kvm@vger.kernel.org >> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to fail >> >> >> On 21.07.14 11:59, mihai.caraman@freescale.com wrote: >>>> -----Original Message----- >>>> From: Linuxppc-dev [mailto:linuxppc-dev- >>>> bounces+mihai.caraman=freescale.com@lists.ozlabs.org] On Behalf Of >>>> mihai.caraman@freescale.com >>>> Sent: Friday, July 18, 2014 12:06 PM >>>> To: Alexander Graf; kvm-ppc@vger.kernel.org >>>> Cc: linuxppc-dev@lists.ozlabs.org; kvm@vger.kernel.org >>>> Subject: RE: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to >> fail >>>> >>>>> -----Original Message----- >>>>> From: Alexander Graf [mailto:agraf@suse.de] >>>>> Sent: Thursday, July 17, 2014 5:21 PM >>>>> To: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org >>>>> Cc: kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org >>>>> Subject: Re: [PATCH v5 4/5] KVM: PPC: Alow kvmppc_get_last_inst() to >>>> fail >>>>> >>>>>> On 17.07.14 13:22, Mihai Caraman wrote: >>>>>> On book3e, guest last instruction is read on the exit path using >> load >>>>>> external pid (lwepx) dedicated instruction. This load operation may >>>>> fail >>>>>> due to TLB eviction and execute-but-not-read entries. >>>>>> >>>>>> This patch lay down the path for an alternative solution to read the >>>>> guest >>>>>> last instruction, by allowing kvmppc_get_lat_inst() function to >> fail. >>>>>> Architecture specific implmentations of kvmppc_load_last_inst() may >>>>> read >>>>>> last guest instruction and instruct the emulation layer to re- >> execute >>>>> the >>>>>> guest in case of failure. >>>>>> >>>>>> Make kvmppc_get_last_inst() definition common between architectures. >>>>>> >>>>>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> >>>>>> --- >>>> ... >>>> >>>>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h >>>>> b/arch/powerpc/include/asm/kvm_ppc.h >>>>>> index e2fd5a1..7f9c634 100644 >>>>>> --- a/arch/powerpc/include/asm/kvm_ppc.h >>>>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h >>>>>> @@ -47,6 +47,11 @@ enum emulation_result { >>>>>> EMULATE_EXIT_USER, /* emulation requires exit to user- >>>> space */ >>>>>> }; >>>>>> >>>>>> +enum instruction_type { >>>>>> + INST_GENERIC, >>>>>> + INST_SC, /* system call */ >>>>>> +}; >>>>>> + >>>>>> extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct >> kvm_vcpu >>>>> *vcpu); >>>>>> extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct >>>> kvm_vcpu >>>>> *vcpu); >>>>>> extern void kvmppc_handler_highmem(void); >>>>>> @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run >> *run, >>>>> struct kvm_vcpu *vcpu, >>>>>> u64 val, unsigned int bytes, >>>>>> int is_default_endian); >>>>>> >>>>>> +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, >>>>>> + enum instruction_type type, u32 *inst); >>>>>> + >>>>>> extern int kvmppc_emulate_instruction(struct kvm_run *run, >>>>>> struct kvm_vcpu *vcpu); >>>>>> extern int kvmppc_emulate_mmio(struct kvm_run *run, struct >> kvm_vcpu >>>>> *vcpu); >>>>>> @@ -234,6 +242,23 @@ struct kvmppc_ops { >>>>>> extern struct kvmppc_ops *kvmppc_hv_ops; >>>>>> extern struct kvmppc_ops *kvmppc_pr_ops; >>>>>> >>>>>> +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, >>>>>> + enum instruction_type type, u32 >> *inst) >>>>>> +{ >>>>>> + int ret = EMULATE_DONE; >>>>>> + >>>>>> + /* Load the instruction manually if it failed to do so in the >>>>>> + * exit path */ >>>>>> + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) >>>>>> + ret = kvmppc_load_last_inst(vcpu, type, &vcpu- >>>>>> arch.last_inst); >>>>>> + >>>>>> + >>>>>> + *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ? >>>>>> + swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; >>>>> This makes even less sense than the previous version. Either you >> treat >>>>> inst as "definitely overwritten" or as "preserves previous data on >>>>> failure". >>>> Both v4 and v5 versions treat inst as "definitely overwritten". >>>> >>>>> So either you unconditionally swap like you did before >>>> If we make abstraction of its symmetry, KVM_INST_FETCH_FAILED is >> operated >>>> in host endianness, so it doesn't need byte swap. >>>> >>>> I agree with your reasoning if last_inst is initialized and compared >> with >>>> data in guest endianess, which is not the case yet for >>>> KVM_INST_FETCH_FAILED. >>> Alex, are you relying on the fact that KVM_INST_FETCH_FAILED value is >> symmetrical? >>> With a non symmetrical value like 0xDEADBEEF, and considering a little- >> endian guest >>> on a big-endian host, we need to fix kvm logic to initialize and >> compare last_inst >>> with 0xEFBEADDE swaped value. >>> >>> Your suggestion to unconditionally swap makes sense only with the above >> fix, otherwise >>> inst may end up with 0xEFBEADDE swaped value with is wrong. >> >> Only for *inst which we would treat as "undefined" after the function >> returned EMULATE_AGAIN. > > Right. With this do you acknowledge that v5 (definitely overwritten approach) > is ok? I think I'm starting to understand your logic of v5. You write fetch_failed into *inst unswapped if the fetch failed. I think that's ok, but I definitely do not like the code flow - it's too hard to understand at a glimpse. Just rewrite it to swab at local variable level, preferably with if()s and comments what this is about and have a single unconditional *inst = fetched_inst; at the end of the function. Alex -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Right. With this do you acknowledge that v5 (definitely overwritten > approach) > > is ok? > > I think I'm starting to understand your logic of v5. You write > fetch_failed into *inst unswapped if the fetch failed. "v5 - don't swap when load fails" :) > > I think that's ok, but I definitely do not like the code flow - it's too > hard to understand at a glimpse. Just rewrite it to swab at local > variable level, preferably with if()s and comments what this is about and > have a single unconditional *inst = fetched_inst; at the end of the > function. I will incorporate these change requests into v6. Thanks, -Mike -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 20fb6f2..a86ca65 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -276,32 +276,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); } -static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) -{ - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); - - return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : - vcpu->arch.last_inst; -} - -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) -{ - return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu)); -} - -/* - * Like kvmppc_get_last_inst(), but for fetching a sc instruction. - * Because the sc instruction sets SRR0 to point to the following - * instruction, we have to fetch from pc - 4. - */ -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) -{ - return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4); -} - static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { return vcpu->arch.fault_dar; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index c7aed61..cbb1990 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) return false; } -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.last_inst; -} - static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) { vcpu->arch.ctr = val; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e2fd5a1..7f9c634 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -47,6 +47,11 @@ enum emulation_result { EMULATE_EXIT_USER, /* emulation requires exit to user-space */ }; +enum instruction_type { + INST_GENERIC, + INST_SC, /* system call */ +}; + extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); extern void kvmppc_handler_highmem(void); @@ -62,6 +67,9 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); +extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, + enum instruction_type type, u32 *inst); + extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); @@ -234,6 +242,23 @@ struct kvmppc_ops { extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_pr_ops; +static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, + enum instruction_type type, u32 *inst) +{ + int ret = EMULATE_DONE; + + /* Load the instruction manually if it failed to do so in the + * exit path */ + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) + ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); + + + *inst = (ret == EMULATE_DONE && kvmppc_need_byteswap(vcpu)) ? + swab32(vcpu->arch.last_inst) : vcpu->arch.last_inst; + + return ret; +} + static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) { return kvm->arch.kvm_ops == kvmppc_hv_ops; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 31facfc..522be6b 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -488,6 +488,23 @@ mmio: } EXPORT_SYMBOL_GPL(kvmppc_ld); +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, + u32 *inst) +{ + ulong pc = kvmppc_get_pc(vcpu); + int r; + + if (type == INST_SC) + pc -= 4; + + r = kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); + if (r == EMULATE_DONE) + return r; + else + return EMULATE_AGAIN; +} +EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return 0; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2d154d9..fa944a3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -530,21 +530,14 @@ static int instruction_is_store(unsigned int instr) static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store) { - int ret; u32 last_inst; - unsigned long srr0 = kvmppc_get_pc(vcpu); - /* We try to load the last instruction. We don't let - * emulate_instruction do it as it doesn't check what - * kvmppc_ld returns. + /* * If we fail, we just return to the guest and try executing it again. */ - if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { - ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); - if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) - return RESUME_GUEST; - vcpu->arch.last_inst = last_inst; - } + if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != + EMULATE_DONE) + return RESUME_GUEST; /* * WARNING: We do not know for sure whether the instruction we just @@ -558,7 +551,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, * we just return and retry the instruction. */ - if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) + if (instruction_is_store(last_inst) != !!is_store) return RESUME_GUEST; /* diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 6c8011f..bfb8035 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -639,26 +639,36 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { - u32 inst = kvmppc_get_last_inst(vcpu); + u32 inst; enum emulation_result emulated = EMULATE_DONE; + int ax_rd, ax_ra, ax_rb, ax_rc; + short full_d; + u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c; - int ax_rd = inst_get_field(inst, 6, 10); - int ax_ra = inst_get_field(inst, 11, 15); - int ax_rb = inst_get_field(inst, 16, 20); - int ax_rc = inst_get_field(inst, 21, 25); - short full_d = inst_get_field(inst, 16, 31); - - u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); - u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); - u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); - u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); - - bool rcomp = (inst & 1) ? true : false; - u32 cr = kvmppc_get_cr(vcpu); + bool rcomp; + u32 cr; #ifdef DEBUG int i; #endif + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); + if (emulated != EMULATE_DONE) + return emulated; + + ax_rd = inst_get_field(inst, 6, 10); + ax_ra = inst_get_field(inst, 11, 15); + ax_rb = inst_get_field(inst, 16, 20); + ax_rc = inst_get_field(inst, 21, 25); + full_d = inst_get_field(inst, 16, 31); + + fpr_d = &VCPU_FPR(vcpu, ax_rd); + fpr_a = &VCPU_FPR(vcpu, ax_ra); + fpr_b = &VCPU_FPR(vcpu, ax_rb); + fpr_c = &VCPU_FPR(vcpu, ax_rc); + + rcomp = (inst & 1) ? true : false; + cr = kvmppc_get_cr(vcpu); + if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 02a983e..af67e93 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1018,15 +1018,24 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, { enum emulation_result er; ulong flags; + u32 last_inst; + int emul; program_interrupt: flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; + emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); + if (emul != EMULATE_DONE) { + r = RESUME_GUEST; + break; + } + if (kvmppc_get_msr(vcpu) & MSR_PR) { #ifdef EXIT_DEBUG - printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); + pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", + kvmppc_get_pc(vcpu), last_inst); #endif - if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != + if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; @@ -1045,7 +1054,7 @@ program_interrupt: break; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", - __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); + __func__, kvmppc_get_pc(vcpu), last_inst); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; @@ -1062,8 +1071,23 @@ program_interrupt: break; } case BOOK3S_INTERRUPT_SYSCALL: + { + u32 last_sc; + int emul; + + /* Get last sc for papr */ + if (vcpu->arch.papr_enabled) { + /* The sc instuction points SRR0 to the next inst */ + emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); + if (emul != EMULATE_DONE) { + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); + r = RESUME_GUEST; + break; + } + } + if (vcpu->arch.papr_enabled && - (kvmppc_get_last_sc(vcpu) == 0x44000022) && + (last_sc == 0x44000022) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); @@ -1108,27 +1132,16 @@ program_interrupt: r = RESUME_GUEST; } break; + } case BOOK3S_INTERRUPT_FP_UNAVAIL: case BOOK3S_INTERRUPT_ALTIVEC: case BOOK3S_INTERRUPT_VSX: { int ext_msr = 0; int emul; - ulong pc; u32 last_inst; - if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { - /* Emulate the instruction */ - - pc = kvmppc_get_pc(vcpu); - last_inst = kvmppc_get_last_inst(vcpu); - emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst, - false); - if (emul == EMULATE_DONE) - goto program_interrupt; - else - r = RESUME_GUEST; - } else { + if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) { /* Do paired single emulation */ switch (exit_nr) { @@ -1146,14 +1159,24 @@ program_interrupt: } r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); + break; + } + + emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); + if (emul == EMULATE_DONE) { + /* we need to emulate this instruction */ + goto program_interrupt; + break; + } else { + r = RESUME_GUEST; } + break; } case BOOK3S_INTERRUPT_ALIGNMENT: { - ulong pc = kvmppc_get_pc(vcpu); - u32 last_inst = kvmppc_get_last_inst(vcpu); - int emul = kvmppc_ld(vcpu, &pc, sizeof(u32), &last_inst, false); + u32 last_inst; + int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul == EMULATE_DONE) { u32 dsisr; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ab62109..34a42b9 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -752,6 +752,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) * they were actually modified by emulation. */ return RESUME_GUEST_NV; + case EMULATE_AGAIN: + return RESUME_GUEST; + case EMULATE_DO_DCR: run->exit_reason = KVM_EXIT_DCR; return RESUME_HOST; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 79677d7..4385c14 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -610,6 +610,12 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, } } +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, + u32 *instr) +{ + return EMULATE_AGAIN; +} + /************* MMU Notifiers *************/ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index da86d9b..c5c64b6 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -224,19 +224,25 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { - u32 inst = kvmppc_get_last_inst(vcpu); - int ra = get_ra(inst); - int rs = get_rs(inst); - int rt = get_rt(inst); - int sprn = get_sprn(inst); - enum emulation_result emulated = EMULATE_DONE; + u32 inst; + int ra, rs, rt, sprn; + enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); + emulated = kvmppc_get_last_inst(vcpu, false, &inst); + if (emulated != EMULATE_DONE) + return emulated; + pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); + ra = get_ra(inst); + rs = get_rs(inst); + rt = get_rt(inst); + sprn = get_sprn(inst); + switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index fe0257a..cfa6cfa 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -280,6 +280,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) * actually modified. */ r = RESUME_GUEST_NV; break; + case EMULATE_AGAIN: + r = RESUME_GUEST; + break; case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; /* We must reload nonvolatiles because "update" load/store @@ -289,11 +292,15 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) r = RESUME_HOST_NV; break; case EMULATE_FAIL: + { + u32 last_inst; + + kvmppc_get_last_inst(vcpu, false, &last_inst); /* XXX Deliver Program interrupt to guest. */ - printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, - kvmppc_get_last_inst(vcpu)); + pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); r = RESUME_HOST; break; + } default: WARN_ON(1); r = RESUME_GUEST;
On book3e, guest last instruction is read on the exit path using load external pid (lwepx) dedicated instruction. This load operation may fail due to TLB eviction and execute-but-not-read entries. This patch lay down the path for an alternative solution to read the guest last instruction, by allowing kvmppc_get_lat_inst() function to fail. Architecture specific implmentations of kvmppc_load_last_inst() may read last guest instruction and instruct the emulation layer to re-execute the guest in case of failure. Make kvmppc_get_last_inst() definition common between architectures. Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com> --- v5 - don't swap when load fail - convert the return value space of kvmppc_ld() v4: - these changes compile on book3s, please validate the functionality and do the necessary adaptations! - common declaration and enum for kvmppc_load_last_inst() - remove kvmppc_read_inst() in a preceding patch v3: - rework patch description - add common definition for kvmppc_get_last_inst() - check return values in book3s code v2: - integrated kvmppc_get_last_inst() in book3s code and checked build - addressed cosmetic feedback arch/powerpc/include/asm/kvm_book3s.h | 26 ------------- arch/powerpc/include/asm/kvm_booke.h | 5 --- arch/powerpc/include/asm/kvm_ppc.h | 25 +++++++++++++ arch/powerpc/kvm/book3s.c | 17 +++++++++ arch/powerpc/kvm/book3s_64_mmu_hv.c | 17 +++------ arch/powerpc/kvm/book3s_paired_singles.c | 38 ++++++++++++------- arch/powerpc/kvm/book3s_pr.c | 63 ++++++++++++++++++++++---------- arch/powerpc/kvm/booke.c | 3 ++ arch/powerpc/kvm/e500_mmu_host.c | 6 +++ arch/powerpc/kvm/emulate.c | 18 ++++++--- arch/powerpc/kvm/powerpc.c | 11 +++++- 11 files changed, 144 insertions(+), 85 deletions(-)