@@ -758,7 +758,7 @@ struct kvm_vcpu_arch {
u8 prodded;
u8 doorbell_request;
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
- u32 last_inst;
+ unsigned long last_inst;
struct rcuwait wait;
struct rcuwait *waitp;
@@ -818,7 +818,7 @@ struct kvm_vcpu_arch {
u64 busy_stolen;
u64 busy_preempt;
- u32 emul_inst;
+ u64 emul_inst;
u32 online;
@@ -85,7 +85,8 @@ extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_fetch_type type, u32 *inst);
+ enum instruction_fetch_type type,
+ unsigned long *inst);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
@@ -328,15 +329,30 @@ static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
/* Write fetch_failed unswapped if the fetch failed */
- if (ret == EMULATE_DONE)
- fetched_inst = kvmppc_need_byteswap(vcpu) ?
- swab32(vcpu->arch.last_inst) :
- vcpu->arch.last_inst;
- else
- fetched_inst = vcpu->arch.last_inst;
+ if (ret != EMULATE_DONE) {
+ *inst = ppc_inst(KVM_INST_FETCH_FAILED);
+ return ret;
+ }
+
+#ifdef CONFIG_PPC64
+ /* Is this a prefixed instruction? */
+ if ((vcpu->arch.last_inst >> 32) != 0) {
+ u32 prefix = vcpu->arch.last_inst >> 32;
+ u32 suffix = vcpu->arch.last_inst;
+ if (kvmppc_need_byteswap(vcpu)) {
+ prefix = swab32(prefix);
+ suffix = swab32(suffix);
+ }
+ *inst = ppc_inst_prefix(prefix, suffix);
+ return EMULATE_DONE;
+ }
+#endif
+ fetched_inst = kvmppc_need_byteswap(vcpu) ?
+ swab32(vcpu->arch.last_inst) :
+ vcpu->arch.last_inst;
*inst = ppc_inst(fetched_inst);
- return ret;
+ return EMULATE_DONE;
}
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
@@ -481,20 +481,42 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
return r;
}
+/*
+ * Returns prefixed instructions with the prefix in the high 32 bits
+ * of *inst and suffix in the low 32 bits. This is the same convention
+ * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
+ * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
+ * half of the value needs byte-swapping if the guest endianness is
+ * different from the host endianness.
+ */
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_fetch_type type, u32 *inst)
+ enum instruction_fetch_type type, unsigned long *inst)
{
ulong pc = kvmppc_get_pc(vcpu);
int r;
+ u32 iw;
if (type == INST_SC)
pc -= 4;
- r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
- if (r == EMULATE_DONE)
- return r;
- else
+ r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
+ if (r != EMULATE_DONE)
return EMULATE_AGAIN;
+ /*
+ * If [H]SRR1 indicates that the instruction that caused the
+ * current interrupt is a prefixed instruction, get the suffix.
+ */
+ if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
+ u32 suffix;
+ pc += 4;
+ r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
+ if (r != EMULATE_DONE)
+ return EMULATE_AGAIN;
+ *inst = ((u64)iw << 32) | suffix;
+ } else {
+ *inst = iw;
+ }
+ return r;
}
EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
@@ -433,6 +433,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store)
{
ppc_inst_t last_inst;
+ bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
/*
* Fast path - check if the guest physical address corresponds to a
@@ -447,7 +448,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) {
- kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4));
return RESUME_GUEST;
}
}
@@ -462,7 +463,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
/*
* WARNING: We do not know for sure whether the instruction we just
* read from memory is the same that caused the fault in the first
- * place. If the instruction we read is neither an load or a store,
+ * place.
+ *
+ * If the fault is prefixed but the instruction is not or vice
+ * versa, try again so that we don't advance pc the wrong amount.
+ */
+ if (ppc_inst_prefixed(last_inst) != is_prefixed)
+ return RESUME_GUEST;
+
+ /*
+ * If the instruction we read is neither an load or a store,
* then it can't access memory, so we don't need to worry about
* enforcing access permissions. So, assuming it is a load or
* store, we just check that its direction (load or store) is
@@ -474,7 +474,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
for (r = 0; r < vcpu->arch.slb_max; ++r)
pr_err(" ESID = %.16llx VSID = %.16llx\n",
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
- pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
+ pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n",
vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
vcpu->arch.last_inst);
}
@@ -1071,11 +1071,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
- stw r3,VCPU_LAST_INST(r9)
+ std r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
-11: stw r3,VCPU_HEIR(r9)
+11: std r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */
mfctr r3
@@ -1676,7 +1676,7 @@ fast_interrupt_c_return:
mtmsrd r3
/* Store the result */
- stw r8, VCPU_LAST_INST(r9)
+ std r8, VCPU_LAST_INST(r9)
/* Unset guest mode. */
li r0, KVM_GUEST_MODE_HOST_HV
@@ -841,7 +841,7 @@ static int emulation_exit(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
case EMULATE_FAIL:
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08lx)\n",
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
@@ -139,7 +139,7 @@ END_BTB_FLUSH_SECTION
* kvmppc_get_last_inst().
*/
li r9, KVM_INST_FETCH_FAILED
- stw r9, VCPU_LAST_INST(r4)
+ PPC_STL r9, VCPU_LAST_INST(r4)
.endif
.if \flags & NEED_ESR
@@ -623,7 +623,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
#ifdef CONFIG_KVM_BOOKE_HV
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_fetch_type type, u32 *instr)
+ enum instruction_fetch_type type, unsigned long *instr)
{
gva_t geaddr;
hpa_t addr;
@@ -713,7 +713,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
}
#else
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
- enum instruction_fetch_type type, u32 *instr)
+ enum instruction_fetch_type type, unsigned long *instr)
{
return EMULATE_AGAIN;
}
@@ -301,6 +301,10 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */
+ /*
+ * If this ever handles prefixed instructions, the 4
+ * will need to become ppc_inst_len(pinst) instead.
+ */
if (advance)
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
@@ -360,7 +360,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
- kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
return emulated;
}