diff mbox

[5/6,v2] kvm: powerpc: booke: Add linux pte lookup like booke3s

Message ID 1375355558-19187-6-git-send-email-Bharat.Bhushan@freescale.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bharat Bhushan Aug. 1, 2013, 11:12 a.m. UTC
KVM need to lookup linux pte for getting TLB attributes (WIMGE).
This is similar to how book3s does.
This will be used in follow-up patches.

Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
---
v1->v2
 - This is a new change in this version

 arch/powerpc/include/asm/kvm_booke.h |   73 ++++++++++++++++++++++++++++++++++
 1 files changed, 73 insertions(+), 0 deletions(-)

Comments

Tiejun Chen Aug. 2, 2013, 6:37 a.m. UTC | #1
On 08/01/2013 07:12 PM, Bharat Bhushan wrote:
> KVM need to lookup linux pte for getting TLB attributes (WIMGE).
> This is similar to how book3s does.
> This will be used in follow-up patches.
>
> Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
> ---
> v1->v2
>   - This is a new change in this version
>
>   arch/powerpc/include/asm/kvm_booke.h |   73 ++++++++++++++++++++++++++++++++++
>   1 files changed, 73 insertions(+), 0 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
> index d3c1eb3..903624d 100644
> --- a/arch/powerpc/include/asm/kvm_booke.h
> +++ b/arch/powerpc/include/asm/kvm_booke.h
> @@ -102,4 +102,77 @@ static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
>   {
>   	return vcpu->arch.shared->msr;
>   }
> +
> +/*
> + * Lock and read a linux PTE.  If it's present and writable, atomically
> + * set dirty and referenced bits and return the PTE, otherwise return 0.
> + */
> +static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
> +{
> +	pte_t pte;
> +
> +#ifdef PTE_ATOMIC_UPDATES
> +	pte_t tmp;
> +        /* wait until _PAGE_BUSY is clear then set it atomically */
> +#ifdef CONFIG_PPC64
> +	__asm__ __volatile__ (
> +		"1:	ldarx	%0,0,%3\n"
> +		"	andi.	%1,%0,%4\n"
> +		"	bne-	1b\n"
> +		"	ori	%1,%0,%4\n"
> +		"	stdcx.	%1,0,%3\n"
> +		"	bne-	1b"
> +		: "=&r" (pte), "=&r" (tmp), "=m" (*p)
> +		: "r" (p), "i" (_PAGE_BUSY)
> +		: "cc");
> +#else
> +        __asm__ __volatile__ (
> +                "1:     lwarx   %0,0,%3\n"
> +                "       andi.   %1,%0,%4\n"
> +                "       bne-    1b\n"
> +                "       ori     %1,%0,%4\n"
> +                "       stwcx.  %1,0,%3\n"
> +                "       bne-    1b"
> +                : "=&r" (pte), "=&r" (tmp), "=m" (*p)
> +                : "r" (p), "i" (_PAGE_BUSY)
> +                : "cc");
> +#endif
> +#else
> +	pte = pte_val(*p);
> +#endif
> +
> +	if (pte_present(pte)) {
> +		pte = pte_mkyoung(pte);
> +		if (writing && pte_write(pte))
> +			pte = pte_mkdirty(pte);
> +	}
> +
> +	*p = pte;	/* clears _PAGE_BUSY */
> +
> +	return pte;
> +}
> +
> +static inline pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
> +			      int writing, unsigned long *pte_sizep)

Looks this function is as same as book3s, so why not improve that as common :)

Tiejun

> +{
> +	pte_t *ptep;
> +	unsigned long ps = *pte_sizep;
> +	unsigned int shift;
> +
> +	ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
> +	if (!ptep)
> +		return __pte(0);
> +	if (shift)
> +		*pte_sizep = 1ul << shift;
> +	else
> +		*pte_sizep = PAGE_SIZE;
> +
> +	if (ps > *pte_sizep)
> +		return __pte(0);
> +	if (!pte_present(*ptep))
> +		return __pte(0);
> +
> +	return kvmppc_read_update_linux_pte(ptep, writing);
> +}
> +
>   #endif /* __ASM_KVM_BOOKE_H__ */
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Scott Wood Aug. 2, 2013, 10:58 p.m. UTC | #2
On Thu, 2013-08-01 at 16:42 +0530, Bharat Bhushan wrote:
> KVM need to lookup linux pte for getting TLB attributes (WIMGE).
> This is similar to how book3s does.
> This will be used in follow-up patches.
> 
> Signed-off-by: Bharat Bhushan <bharat.bhushan@freescale.com>
> ---
> v1->v2
>  - This is a new change in this version
> 
>  arch/powerpc/include/asm/kvm_booke.h |   73 ++++++++++++++++++++++++++++++++++
>  1 files changed, 73 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
> index d3c1eb3..903624d 100644
> --- a/arch/powerpc/include/asm/kvm_booke.h
> +++ b/arch/powerpc/include/asm/kvm_booke.h
> @@ -102,4 +102,77 @@ static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
>  {
>  	return vcpu->arch.shared->msr;
>  }
> +
> +/*
> + * Lock and read a linux PTE.  If it's present and writable, atomically
> + * set dirty and referenced bits and return the PTE, otherwise return 0.
> + */
> +static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
> +{
> +	pte_t pte;
> +
> +#ifdef PTE_ATOMIC_UPDATES
> +	pte_t tmp;
> +        /* wait until _PAGE_BUSY is clear then set it atomically */

_PAGE_BUSY is 0 on book3e.

> +#ifdef CONFIG_PPC64
> +	__asm__ __volatile__ (
> +		"1:	ldarx	%0,0,%3\n"
> +		"	andi.	%1,%0,%4\n"
> +		"	bne-	1b\n"
> +		"	ori	%1,%0,%4\n"
> +		"	stdcx.	%1,0,%3\n"
> +		"	bne-	1b"
> +		: "=&r" (pte), "=&r" (tmp), "=m" (*p)
> +		: "r" (p), "i" (_PAGE_BUSY)
> +		: "cc");
> +#else
> +        __asm__ __volatile__ (
> +                "1:     lwarx   %0,0,%3\n"
> +                "       andi.   %1,%0,%4\n"
> +                "       bne-    1b\n"
> +                "       ori     %1,%0,%4\n"
> +                "       stwcx.  %1,0,%3\n"
> +                "       bne-    1b"
> +                : "=&r" (pte), "=&r" (tmp), "=m" (*p)
> +                : "r" (p), "i" (_PAGE_BUSY)
> +                : "cc");
> +#endif

What about 64-bit PTEs on 32-bit kernels?

In any case, this code does not belong in KVM.  It should be in the main
PPC mm code, even if KVM is the only user.

-Scott



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 2, 2013, 11:16 p.m. UTC | #3
On Fri, 2013-08-02 at 17:58 -0500, Scott Wood wrote:
> 
> What about 64-bit PTEs on 32-bit kernels?
> 
> In any case, this code does not belong in KVM.  It should be in the
> main
> PPC mm code, even if KVM is the only user.

Also don't we do similar things in BookS KVM ? At the very least that
sutff should become common. And yes, I agree, it should probably also
move to pgtable*

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bharat Bhushan Aug. 3, 2013, 2:58 a.m. UTC | #4
DQoNCj4gLS0tLS1PcmlnaW5hbCBNZXNzYWdlLS0tLS0NCj4gRnJvbTogQmVuamFtaW4gSGVycmVu
c2NobWlkdCBbbWFpbHRvOmJlbmhAa2VybmVsLmNyYXNoaW5nLm9yZ10NCj4gU2VudDogU2F0dXJk
YXksIEF1Z3VzdCAwMywgMjAxMyA0OjQ3IEFNDQo+IFRvOiBXb29kIFNjb3R0LUIwNzQyMQ0KPiBD
YzogQmh1c2hhbiBCaGFyYXQtUjY1Nzc3OyBhZ3JhZkBzdXNlLmRlOyBrdm0tcHBjQHZnZXIua2Vy
bmVsLm9yZzsNCj4ga3ZtQHZnZXIua2VybmVsLm9yZzsgbGludXhwcGMtZGV2QGxpc3RzLm96bGFi
cy5vcmc7IEJodXNoYW4gQmhhcmF0LVI2NTc3Nw0KPiBTdWJqZWN0OiBSZTogW1BBVENIIDUvNiB2
Ml0ga3ZtOiBwb3dlcnBjOiBib29rZTogQWRkIGxpbnV4IHB0ZSBsb29rdXAgbGlrZQ0KPiBib29r
ZTNzDQo+IA0KPiBPbiBGcmksIDIwMTMtMDgtMDIgYXQgMTc6NTggLTA1MDAsIFNjb3R0IFdvb2Qg
d3JvdGU6DQo+ID4NCj4gPiBXaGF0IGFib3V0IDY0LWJpdCBQVEVzIG9uIDMyLWJpdCBrZXJuZWxz
Pw0KPiA+DQo+ID4gSW4gYW55IGNhc2UsIHRoaXMgY29kZSBkb2VzIG5vdCBiZWxvbmcgaW4gS1ZN
LiAgSXQgc2hvdWxkIGJlIGluIHRoZQ0KPiA+IG1haW4gUFBDIG1tIGNvZGUsIGV2ZW4gaWYgS1ZN
IGlzIHRoZSBvbmx5IHVzZXIuDQo+IA0KPiBBbHNvIGRvbid0IHdlIGRvIHNpbWlsYXIgdGhpbmdz
IGluIEJvb2tTIEtWTSA/IEF0IHRoZSB2ZXJ5IGxlYXN0IHRoYXQgc3V0ZmYNCj4gc2hvdWxkIGJl
Y29tZSBjb21tb24uIEFuZCB5ZXMsIEkgYWdyZWUsIGl0IHNob3VsZCBwcm9iYWJseSBhbHNvIG1v
dmUgdG8gcGd0YWJsZSoNCg0KT25lIG9mIHRoZSBwcm9ibGVtIEkgc2F3IHdhcyB0aGF0IGlmIEkg
cHV0IHRoaXMgY29kZSBpbiBhc20vcGd0YWJsZS0zMi5oIGFuZCBhc20vcGd0YWJsZS02NC5oIHRo
ZW4gcHRlX3BlcnNlbnQoKSBhbmQgb3RoZXIgZnJpZW5kIGZ1bmN0aW9uIChvbiB3aGljaCB0aGlz
IGNvZGUgZGVwZW5kcykgYXJlIGRlZmluZWQgaW4gcGd0YWJsZS5oLiBBbmQgcGd0YWJsZS5oIGlu
Y2x1ZGVzIGFzbS9wZ3RhYmxlLTMyLmggYW5kIGFzbS9wZ3RhYmxlLTY0LmggYmVmb3JlIGl0IGRl
ZmluZXMgcHRlX3ByZXNlbnQoKSBhbmQgZnJpZW5kcyBmdW5jdGlvbnMuDQoNCk9rIEkgbW92ZSB3
b3ZlIHRoaXMgaW4gYXNtL3BndGFibGUqLmgsIGluaXRpYWxseSBJIGZvdWdodCB3aXRoIG15c2Vs
ZiB0byB0YWtlIHRoaXMgY29kZSBpbiBwZ3RhYmxlKiBidXQgZmluYWxseSBlbmQgdXAgZG9pbmcg
aGVyZSAoZ290IGJpYXNlZCBieSBib29rM3MgOikpLg0KDQpUaGFua3MNCi1CaGFyYXQNCg0KPiAN
Cj4gQ2hlZXJzLA0KPiBCZW4uDQo+IA0KPiANCg0K

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Benjamin Herrenschmidt Aug. 3, 2013, 4:24 a.m. UTC | #5
On Sat, 2013-08-03 at 02:58 +0000, Bhushan Bharat-R65777 wrote:
> One of the problem I saw was that if I put this code in
> asm/pgtable-32.h and asm/pgtable-64.h then pte_persent() and other
> friend function (on which this code depends) are defined in pgtable.h.
> And pgtable.h includes asm/pgtable-32.h and asm/pgtable-64.h before it
> defines pte_present() and friends functions.
> 
> Ok I move wove this in asm/pgtable*.h, initially I fought with myself
> to take this code in pgtable* but finally end up doing here (got
> biased by book3s :)).

Is there a reason why these routines can not be completely generic
in pgtable.h ?

Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d3c1eb3..903624d 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -102,4 +102,77 @@  static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
 {
 	return vcpu->arch.shared->msr;
 }
+
+/*
+ * Lock and read a linux PTE.  If it's present and writable, atomically
+ * set dirty and referenced bits and return the PTE, otherwise return 0.
+ */
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
+{
+	pte_t pte;
+
+#ifdef PTE_ATOMIC_UPDATES
+	pte_t tmp;
+        /* wait until _PAGE_BUSY is clear then set it atomically */
+#ifdef CONFIG_PPC64
+	__asm__ __volatile__ (
+		"1:	ldarx	%0,0,%3\n"
+		"	andi.	%1,%0,%4\n"
+		"	bne-	1b\n"
+		"	ori	%1,%0,%4\n"
+		"	stdcx.	%1,0,%3\n"
+		"	bne-	1b"
+		: "=&r" (pte), "=&r" (tmp), "=m" (*p)
+		: "r" (p), "i" (_PAGE_BUSY)
+		: "cc");
+#else
+        __asm__ __volatile__ (
+                "1:     lwarx   %0,0,%3\n"
+                "       andi.   %1,%0,%4\n"
+                "       bne-    1b\n"
+                "       ori     %1,%0,%4\n"
+                "       stwcx.  %1,0,%3\n"
+                "       bne-    1b"
+                : "=&r" (pte), "=&r" (tmp), "=m" (*p)
+                : "r" (p), "i" (_PAGE_BUSY)
+                : "cc");
+#endif
+#else
+	pte = pte_val(*p);
+#endif
+
+	if (pte_present(pte)) {
+		pte = pte_mkyoung(pte);
+		if (writing && pte_write(pte))
+			pte = pte_mkdirty(pte);
+	}
+
+	*p = pte;	/* clears _PAGE_BUSY */
+
+	return pte;
+}
+
+static inline pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
+			      int writing, unsigned long *pte_sizep)
+{
+	pte_t *ptep;
+	unsigned long ps = *pte_sizep;
+	unsigned int shift;
+
+	ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
+	if (!ptep)
+		return __pte(0);
+	if (shift)
+		*pte_sizep = 1ul << shift;
+	else
+		*pte_sizep = PAGE_SIZE;
+
+	if (ps > *pte_sizep)
+		return __pte(0);
+	if (!pte_present(*ptep))
+		return __pte(0);
+
+	return kvmppc_read_update_linux_pte(ptep, writing);
+}
+
 #endif /* __ASM_KVM_BOOKE_H__ */