@@ -34,16 +34,112 @@ struct kvm_vcpu_arch_shared {
__u32 dsisr;
};
+#define KVM_PVR_PARA 0x4b564d3f /* "KVM?" */
+#define KVM_SC_MAGIC_R0 0x4b564d52 /* "KVMR" */
+#define KVM_SC_MAGIC_R3 0x554c455a /* "ULEZ" */
+
#ifdef __KERNEL__
static inline int kvm_para_available(void)
{
- return 0;
+ unsigned long pvr = KVM_PVR_PARA;
+
+ asm volatile("mfpvr %0" : "=r"(pvr) : "0"(pvr));
+ return pvr == KVM_PVR_PARA;
+}
+
+static inline long kvm_hypercall0(unsigned int nr)
+{
+ unsigned long register r0 asm("r0") = KVM_SC_MAGIC_R0;
+ unsigned long register r3 asm("r3") = KVM_SC_MAGIC_R3;
+ unsigned long register _nr asm("r4") = nr;
+
+ asm volatile("sc"
+ : "=r"(r3)
+ : "r"(r0), "r"(r3), "r"(_nr)
+ : "memory");
+
+ return r3;
}
+static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ unsigned long register r0 asm("r0") = KVM_SC_MAGIC_R0;
+ unsigned long register r3 asm("r3") = KVM_SC_MAGIC_R3;
+ unsigned long register _nr asm("r4") = nr;
+ unsigned long register _p1 asm("r5") = p1;
+
+ asm volatile("sc"
+ : "=r"(r3)
+ : "r"(r0), "r"(r3), "r"(_nr), "r"(_p1)
+ : "memory");
+
+ return r3;
+}
+
+static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ unsigned long register r0 asm("r0") = KVM_SC_MAGIC_R0;
+ unsigned long register r3 asm("r3") = KVM_SC_MAGIC_R3;
+ unsigned long register _nr asm("r4") = nr;
+ unsigned long register _p1 asm("r5") = p1;
+ unsigned long register _p2 asm("r6") = p2;
+
+ asm volatile("sc"
+ : "=r"(r3)
+ : "r"(r0), "r"(r3), "r"(_nr), "r"(_p1), "r"(_p2)
+ : "memory");
+
+ return r3;
+}
+
+static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ unsigned long register r0 asm("r0") = KVM_SC_MAGIC_R0;
+ unsigned long register r3 asm("r3") = KVM_SC_MAGIC_R3;
+ unsigned long register _nr asm("r4") = nr;
+ unsigned long register _p1 asm("r5") = p1;
+ unsigned long register _p2 asm("r6") = p2;
+ unsigned long register _p3 asm("r7") = p3;
+
+ asm volatile("sc"
+ : "=r"(r3)
+ : "r"(r0), "r"(r3), "r"(_nr), "r"(_p1), "r"(_p2), "r"(_p3)
+ : "memory");
+
+ return r3;
+}
+
+static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ unsigned long register r0 asm("r0") = KVM_SC_MAGIC_R0;
+ unsigned long register r3 asm("r3") = KVM_SC_MAGIC_R3;
+ unsigned long register _nr asm("r4") = nr;
+ unsigned long register _p1 asm("r5") = p1;
+ unsigned long register _p2 asm("r6") = p2;
+ unsigned long register _p3 asm("r7") = p3;
+ unsigned long register _p4 asm("r8") = p4;
+
+ asm volatile("sc"
+ : "=r"(r3)
+ : "r"(r0), "r"(r3), "r"(_nr), "r"(_p1), "r"(_p2), "r"(_p3),
+ "r"(_p4)
+ : "memory");
+
+ return r3;
+}
+
+
static inline unsigned int kvm_arch_para_features(void)
{
- return 0;
+ if (!kvm_para_available())
+ return 0;
+
+ return kvm_hypercall0(KVM_HC_FEATURES);
}
#endif /* __KERNEL__ */
@@ -107,6 +107,7 @@ extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
/*
* Cuts out inst bits with ordering according to spec.
@@ -947,10 +947,10 @@ program_interrupt:
break;
}
case BOOK3S_INTERRUPT_SYSCALL:
- // XXX make user settable
if (vcpu->arch.osi_enabled &&
(((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
(((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ /* MOL hypercalls */
u64 *gprs = run->osi.gprs;
int i;
@@ -959,8 +959,14 @@ program_interrupt:
gprs[i] = kvmppc_get_gpr(vcpu, i);
vcpu->arch.osi_needed = 1;
r = RESUME_HOST_NV;
-
+ } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0) &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == KVM_SC_MAGIC_R3)) {
+ /* KVM PV hypercalls */
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ r = RESUME_GUEST;
} else {
+ /* Guest syscalls */
vcpu->stat.syscall_exits++;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
@@ -338,7 +338,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_SYSCALL:
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
+ if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0) &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == KVM_SC_MAGIC_R3)) {
+ /* KVM PV hypercalls */
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ r = RESUME_GUEST;
+ } else {
+ /* Guest syscalls */
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
+ }
kvmppc_account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST;
break;
@@ -248,7 +248,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
break;
case SPRN_PVR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
+ {
+ /* Expose PV interface */
+ if (kvmppc_get_gpr(vcpu, rt) == KVM_PVR_PARA) {
+ kvmppc_set_gpr(vcpu, rt, KVM_PVR_PARA);
+ break;
+ }
+
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr);
+ break;
+ }
case SPRN_PIR:
kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
case SPRN_MSSSR0:
@@ -42,6 +42,34 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
!!(v->arch.pending_exceptions);
}
+int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
+{
+ int nr = kvmppc_get_gpr(vcpu, 4);
+ int r;
+ unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 5);
+ unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 6);
+ unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 7);
+ unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 8);
+
+ if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ /* 32 bit mode */
+ param1 &= 0xffffffff;
+ param2 &= 0xffffffff;
+ param3 &= 0xffffffff;
+ param4 &= 0xffffffff;
+ }
+
+ switch (nr) {
+ case KVM_HC_FEATURES:
+ r = 0;
+ break;
+ default:
+ r = -KVM_ENOSYS;
+ break;
+ }
+
+ return r;
+}
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
@@ -17,6 +17,7 @@
#define KVM_HC_VAPIC_POLL_IRQ 1
#define KVM_HC_MMU_OP 2
+#define KVM_HC_FEATURES 3
/*
* hypercalls use architecture specific