@@ -241,6 +241,31 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.shared->int_pending = 0;
}
+/* Check if a DTLB miss was on the magic page. Returns !0 if so. */
+int kvmppc_dtlb_magic_page(struct kvm_vcpu *vcpu, ulong eaddr)
+{
+ ulong mp_ea = vcpu->arch.magic_page_ea;
+ ulong gpaddr = vcpu->arch.magic_page_pa;
+ int gtlb_index = 11 | (1 << 16); /* Random number in TLB1 */
+
+ /* Check for existence of magic page */
+ if(likely(!mp_ea))
+ return 0;
+
+ /* Check if we're on the magic page */
+ if(likely((eaddr >> 12) != (mp_ea >> 12)))
+ return 0;
+
+ /* Don't map in user mode */
+ if(vcpu->arch.shared->msr & MSR_PR)
+ return 0;
+
+ kvmppc_mmu_map(vcpu, vcpu->arch.magic_page_ea, gpaddr, gtlb_index);
+ kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+
+ return 1;
+}
+
/**
* kvmppc_handle_exit
*
@@ -308,6 +333,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_HOST;
break;
case EMULATE_FAIL:
+ case EMULATE_DO_MMIO:
/* XXX Deliver Program interrupt to guest. */
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
@@ -377,6 +403,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
+ if (kvmppc_dtlb_magic_page(vcpu, eaddr))
+ break;
+
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) {
@@ -295,9 +295,22 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct page *new_page;
struct tlbe *stlbe;
hpa_t hpaddr;
+ u32 mas2 = gtlbe->mas2;
+ u32 mas3 = gtlbe->mas3;
stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
+ if ((vcpu_e500->vcpu.arch.magic_page_ea) &&
+ ((vcpu_e500->vcpu.arch.magic_page_pa >> PAGE_SHIFT) == gfn) &&
+ !(vcpu_e500->vcpu.arch.shared->msr & MSR_PR)) {
+ mas2 = 0;
+ mas3 = E500_TLB_SUPER_PERM_MASK;
+ hpaddr = virt_to_phys(vcpu_e500->vcpu.arch.shared);
+ new_page = pfn_to_page(hpaddr >> PAGE_SHIFT);
+ get_page(new_page);
+ goto mapped;
+ }
+
/* Get reference to new page. */
new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
if (is_error_page(new_page)) {
@@ -305,6 +318,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvm_release_page_clean(new_page);
return;
}
+
+mapped:
hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */
@@ -316,10 +331,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
- | e500_shadow_mas2_attrib(gtlbe->mas2,
+ | e500_shadow_mas2_attrib(mas2,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
stlbe->mas3 = (hpaddr & MAS3_RPN)
- | e500_shadow_mas3_attrib(gtlbe->mas3,
+ | e500_shadow_mas3_attrib(mas3,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;