Message ID | 1375788674-13140-7-git-send-email-Bharat.Bhushan@freescale.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, 2013-08-06 at 17:01 +0530, Bharat Bhushan wrote: > @@ -449,7 +446,16 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, > gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); > } > > - kvmppc_e500_ref_setup(ref, gtlbe, pfn); > + pgdir = vcpu_e500->vcpu.arch.pgdir; > + ptep = lookup_linux_pte(pgdir, hva, &tsize_pages); > + if (pte_present(*ptep)) { > + wimg = (pte_val(*ptep) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; > + } else { > + printk(KERN_ERR "pte not present: gfn %lx, pfn %lx\n", > + (long)gfn, pfn); > + return -EINVAL; Don't let the guest spam the host kernel console by repeatedly accessing bad mappings (even if it requires host userspace to assist by pointing a memslot at a bad hva). This should at most be printk_ratelimited(), and probably just pr_debug(). It should also have __func__ context. Also, I don't see the return value getting checked (the immediate callers check it and propogate the error, but kvmppc_mmu_map() doesn't). We want to send a machine check to the guest if this happens (or possibly exit to userspace since it indicates a bad memslot, not just a guest bug). We don't want to just silently retry over and over. Otherwise, this series looks good to me. -Scott -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
DQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogV29vZCBTY290dC1CMDc0 MjENCj4gU2VudDogU2F0dXJkYXksIEF1Z3VzdCAxMCwgMjAxMyA2OjM1IEFNDQo+IFRvOiBCaHVz aGFuIEJoYXJhdC1SNjU3NzcNCj4gQ2M6IGJlbmhAa2VybmVsLmNyYXNoaW5nLm9yZzsgYWdyYWZA c3VzZS5kZTsgcGF1bHVzQHNhbWJhLm9yZzsNCj4ga3ZtQHZnZXIua2VybmVsLm9yZzsga3ZtLXBw Y0B2Z2VyLmtlcm5lbC5vcmc7IGxpbnV4cHBjLWRldkBsaXN0cy5vemxhYnMub3JnOw0KPiBCaHVz aGFuIEJoYXJhdC1SNjU3NzcNCj4gU3ViamVjdDogUmU6IFtQQVRDSCA2LzYgdjNdIGt2bTogcG93 ZXJwYzogdXNlIGNhY2hpbmcgYXR0cmlidXRlcyBhcyBwZXIgbGludXgNCj4gcHRlDQo+IA0KPiBP biBUdWUsIDIwMTMtMDgtMDYgYXQgMTc6MDEgKzA1MzAsIEJoYXJhdCBCaHVzaGFuIHdyb3RlOg0K PiA+IEBAIC00NDksNyArNDQ2LDE2IEBAIHN0YXRpYyBpbmxpbmUgaW50IGt2bXBwY19lNTAwX3No YWRvd19tYXAoc3RydWN0DQo+IGt2bXBwY192Y3B1X2U1MDAgKnZjcHVfZTUwMCwNCj4gPiAgCQln dmFkZHIgJj0gfigodHNpemVfcGFnZXMgPDwgUEFHRV9TSElGVCkgLSAxKTsNCj4gPiAgCX0NCj4g Pg0KPiA+IC0Ja3ZtcHBjX2U1MDBfcmVmX3NldHVwKHJlZiwgZ3RsYmUsIHBmbik7DQo+ID4gKwlw Z2RpciA9IHZjcHVfZTUwMC0+dmNwdS5hcmNoLnBnZGlyOw0KPiA+ICsJcHRlcCA9IGxvb2t1cF9s aW51eF9wdGUocGdkaXIsIGh2YSwgJnRzaXplX3BhZ2VzKTsNCj4gPiArCWlmIChwdGVfcHJlc2Vu dCgqcHRlcCkpIHsNCj4gPiArCQl3aW1nID0gKHB0ZV92YWwoKnB0ZXApID4+IFBURV9XSU1HRV9T SElGVCkgJiBNQVMyX1dJTUdFX01BU0s7DQo+ID4gKwl9IGVsc2Ugew0KPiA+ICsJCXByaW50ayhL RVJOX0VSUiAicHRlIG5vdCBwcmVzZW50OiBnZm4gJWx4LCBwZm4gJWx4XG4iLA0KPiA+ICsJCQkJ KGxvbmcpZ2ZuLCBwZm4pOw0KPiA+ICsJCXJldHVybiAtRUlOVkFMOw0KPiANCj4gRG9uJ3QgbGV0 IHRoZSBndWVzdCBzcGFtIHRoZSBob3N0IGtlcm5lbCBjb25zb2xlIGJ5IHJlcGVhdGVkbHkgYWNj ZXNzaW5nIGJhZA0KPiBtYXBwaW5ncyAoZXZlbiBpZiBpdCByZXF1aXJlcyBob3N0IHVzZXJzcGFj ZSB0byBhc3Npc3QgYnkgcG9pbnRpbmcgYSBtZW1zbG90IGF0DQo+IGEgYmFkIGh2YSkuICBUaGlz IHNob3VsZCBhdCBtb3N0IGJlIHByaW50a19yYXRlbGltaXRlZCgpLCBhbmQgcHJvYmFibHkganVz dA0KPiBwcl9kZWJ1ZygpLiAgSXQgc2hvdWxkIGFsc28gaGF2ZSBfX2Z1bmNfXyBjb250ZXh0Lg0K DQpWZXJ5IGdvb2QgcG9pbnQsIEkgd2lsbCBtYWtlIHRoaXMgcHJpbnRrX3JhdGVsaW1pdGVkKCkg aW4gdGhpcyBwYXRjaC4gQW5kIGNvbnZlcnQgdGhpcyBhbmQgb3RoZXIgZXJyb3IgcHJpbnRzIHRv IHByX2RlYnVnKCkgd2hlbiB3ZSB3aWxsIHNlbmQgbWFjaGluZSBjaGVjayBvbiBlcnJvciBpbiB0 aGlzIGZsb3cuDQoNCj4gDQo+IEFsc28sIEkgZG9uJ3Qgc2VlIHRoZSByZXR1cm4gdmFsdWUgZ2V0 dGluZyBjaGVja2VkICh0aGUgaW1tZWRpYXRlIGNhbGxlcnMgY2hlY2sNCj4gaXQgYW5kIHByb3Bv Z2F0ZSB0aGUgZXJyb3IsIGJ1dCBrdm1wcGNfbW11X21hcCgpIGRvZXNuJ3QpLg0KPiBXZSB3YW50 IHRvIHNlbmQgYSBtYWNoaW5lIGNoZWNrIHRvIHRoZSBndWVzdCBpZiB0aGlzIGhhcHBlbnMgKG9y IHBvc3NpYmx5IGV4aXQNCj4gdG8gdXNlcnNwYWNlIHNpbmNlIGl0IGluZGljYXRlcyBhIGJhZCBt ZW1zbG90LCBub3QganVzdCBhIGd1ZXN0IGJ1ZykuICBXZSBkb24ndA0KPiB3YW50IHRvIGp1c3Qg c2lsZW50bHkgcmV0cnkgb3ZlciBhbmQgb3Zlci4NCg0KSSBjb21wbGV0ZWx5IGFncmVlIHdpdGgg eW91LCBidXQgdGhpcyB3YXMgc29tZXRoaW5nIGFscmVhZHkgbWlzc2luZyAoZXJyb3IgcmV0dXJu IGJ5IHRoaXMgZnVuY3Rpb24gaXMgbm90aGluZyBuZXcgYWRkZWQgaW4gdGhpcyBwYXRjaCksIFNv IEkgd291bGQgbGlrZSB0byB0YWtlIHRoYXQgc2VwYXJhdGVseS4NCg0KPiANCj4gT3RoZXJ3aXNl LCB0aGlzIHNlcmllcyBsb29rcyBnb29kIHRvIG1lLg0KDQpUaGFuayB5b3UuIDopDQotQmhhcmF0 DQoNCj4gDQo+IC1TY290dA0KPiANCg0K -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 3328353..583d405 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -535,6 +535,7 @@ struct kvm_vcpu_arch { #endif gpa_t paddr_accessed; gva_t vaddr_accessed; + pgd_t *pgdir; u8 io_gpr; /* GPR used as IO source/target */ u8 mmio_is_bigendian; @@ -592,7 +593,6 @@ struct kvm_vcpu_arch { struct list_head run_list; struct task_struct *run_task; struct kvm_run *kvm_run; - pgd_t *pgdir; spinlock_t vpa_update_lock; struct kvmppc_vpa vpa; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 17722d8..0d96d50 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -696,8 +696,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif + vcpu->arch.pgdir = current->mm->pgd; kvmppc_fix_ee_before_entry(); - ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 4fd9650..fc4b2f6 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -31,11 +31,13 @@ enum vcpu_ftr { #define E500_TLB_NUM 2 /* entry is mapped somewhere in host TLB */ -#define E500_TLB_VALID (1 << 0) +#define E500_TLB_VALID (1 << 31) /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ -#define E500_TLB_BITMAP (1 << 1) +#define E500_TLB_BITMAP (1 << 30) /* TLB1 entry is mapped by host TLB0 */ -#define E500_TLB_TLB0 (1 << 2) +#define E500_TLB_TLB0 (1 << 29) +/* Lower 5 bits have WIMGE value */ +#define E500_TLB_WIMGE_MASK (0x1f) struct tlbe_ref { pfn_t pfn; /* valid only for TLB0, except briefly */ diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 1c6a9d7..001a2b0 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -64,15 +64,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) return mas3; } -static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) -{ -#ifdef CONFIG_SMP - return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; -#else - return mas2 & MAS2_ATTRIB_MASK; -#endif -} - /* * writing shadow tlb entry to host TLB */ @@ -248,10 +239,12 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, - pfn_t pfn) + pfn_t pfn, int wimg) { ref->pfn = pfn; ref->flags |= E500_TLB_VALID; + /* Use guest supplied MAS2_G and MAS2_E */ + ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); @@ -312,8 +305,7 @@ static void kvmppc_e500_setup_stlbe( /* Force IPROT=0 for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; - stlbe->mas2 = (gvaddr & MAS2_EPN) | - e500_shadow_mas2_attrib(gtlbe->mas2, pr); + stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_WIMGE_MASK); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); @@ -332,6 +324,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; + unsigned long tsize_pages = 0; + pte_t *ptep; + int wimg = 0; + pgd_t *pgdir; /* * Translate guest physical to true physical, acquiring @@ -394,7 +390,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, */ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end, tsize_pages; + unsigned long gfn_start, gfn_end; tsize_pages = 1 << (tsize - 2); gfn_start = gfn & ~(tsize_pages - 1); @@ -436,7 +432,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, } if (likely(!pfnmap)) { - unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); + tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); + pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", @@ -449,7 +446,16 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - kvmppc_e500_ref_setup(ref, gtlbe, pfn); + pgdir = vcpu_e500->vcpu.arch.pgdir; + ptep = lookup_linux_pte(pgdir, hva, &tsize_pages); + if (pte_present(*ptep)) { + wimg = (pte_val(*ptep) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; + } else { + printk(KERN_ERR "pte not present: gfn %lx, pfn %lx\n", + (long)gfn, pfn); + return -EINVAL; + } + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe);
KVM uses same WIM tlb attributes as the corresponding qemu pte. For this we now search the linux pte for the requested page and get these cache caching/coherency attributes from pte. Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com> --- v2->v3 - setting pgdir before kvmppc_fix_ee_before_entry() on vcpu_run - Aligned as per changes in patch 5/6 - setting WIMG for pfnmap pages also v1->v2 - Use Linux pte for wimge rather than RAM/no-RAM mechanism arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/kvm/booke.c | 2 +- arch/powerpc/kvm/e500.h | 8 ++++-- arch/powerpc/kvm/e500_mmu_host.c | 36 ++++++++++++++++++++-------------- 4 files changed, 28 insertions(+), 20 deletions(-)