From patchwork Fri Jun 25 23:16:56 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexander Graf X-Patchwork-Id: 108170 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.4/8.14.3) with ESMTP id o5PNHM7r032487 for ; Fri, 25 Jun 2010 23:17:23 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932065Ab0FYXRE (ORCPT ); Fri, 25 Jun 2010 19:17:04 -0400 Received: from cantor.suse.de ([195.135.220.2]:41137 "EHLO mx1.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756283Ab0FYXRA (ORCPT ); Fri, 25 Jun 2010 19:17:00 -0400 Received: from relay2.suse.de (charybdis-ext.suse.de [195.135.221.2]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.suse.de (Postfix) with ESMTP id 6B76E8D893; Sat, 26 Jun 2010 01:16:58 +0200 (CEST) From: Alexander Graf To: kvm-ppc@vger.kernel.org Cc: KVM list , linuxppc-dev Subject: [PATCH] KVM: PPC: Add generic hpte management functions Date: Sat, 26 Jun 2010 01:16:56 +0200 Message-Id: <1277507817-626-2-git-send-email-agraf@suse.de> X-Mailer: git-send-email 1.6.0.2 In-Reply-To: <1277507817-626-1-git-send-email-agraf@suse.de> References: <1277507817-626-1-git-send-email-agraf@suse.de> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Fri, 25 Jun 2010 23:17:23 +0000 (UTC) diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c new file mode 100644 index 0000000..5826e61 --- /dev/null +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. + * + * Authors: + * Alexander Graf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#define PTE_SIZE 12 + +/* #define DEBUG_MMU */ + +#ifdef DEBUG_MMU +#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) +#else +#define dprintk_mmu(a, ...) do { } while(0) +#endif + +static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) { + return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); +} + +static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) { + return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); +} + +static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) { + return hash_64((vpage & 0xffffff000ULL) >> 12, + HPTEG_HASH_BITS_VPTE_LONG); +} + +void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +{ + u64 index; + + /* Add to ePTE list */ + index = kvmppc_mmu_hash_pte(pte->pte.eaddr); + list_add(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); + + /* Add to vPTE list */ + index = kvmppc_mmu_hash_vpte(pte->pte.vpage); + list_add(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); + + /* Add to vPTE_long list */ + index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); + list_add(&pte->list_vpte_long, &vcpu->arch.hpte_hash_vpte_long[index]); +} + +static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +{ + dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", + pte->pte.eaddr, pte->pte.vpage, pte->host_va); + + /* Different for 32 and 64 bit */ + kvmppc_mmu_invalidate_pte(vcpu, pte); + + if (pte->pte.may_write) + kvm_release_pfn_dirty(pte->pfn); + else + kvm_release_pfn_clean(pte->pfn); + + list_del(&pte->list_pte); + list_del(&pte->list_vpte); + list_del(&pte->list_vpte_long); + + kmem_cache_free(vcpu->arch.hpte_cache, pte); +} + +static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) +{ + struct hpte_cache *pte, *tmp; + int i; + + for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { + struct list_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; + + list_for_each_entry_safe(pte, tmp, list, list_vpte_long) { + /* Jump over the helper entry */ + if (&pte->list_vpte_long == list) + continue; + + invalidate_pte(vcpu, pte); + } + } +} + +void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) +{ + u64 i; + + dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", + vcpu->arch.hpte_cache_count, guest_ea, ea_mask); + + guest_ea &= ea_mask; + + switch (ea_mask) { + case ~0xfffUL: + { + struct list_head *list; + struct hpte_cache *pte, *tmp; + + /* Find the list of entries in the map */ + list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; + + /* Check the list for matching entries */ + list_for_each_entry_safe(pte, tmp, list, list_pte) { + /* Jump over the helper entry */ + if (&pte->list_pte == list) + continue; + + /* Invalidate matching PTE */ + if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) + invalidate_pte(vcpu, pte); + } + break; + } + case 0x0ffff000: + /* 32-bit flush w/o segment, go through all possible segments */ + for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) + kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); + break; + case 0: + /* Doing a complete flush -> start from scratch */ + kvmppc_mmu_pte_flush_all(vcpu); + break; + default: + WARN_ON(1); + break; + } +} + +/* Flush with mask 0xfffffffff */ +static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) +{ + struct list_head *list; + struct hpte_cache *pte, *tmp; + u64 vp_mask = 0xfffffffffULL; + + list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; + + /* Check the list for matching entries */ + list_for_each_entry_safe(pte, tmp, list, list_vpte) { + /* Jump over the helper entry */ + if (&pte->list_vpte == list) + continue; + + /* Invalidate matching PTEs */ + if ((pte->pte.vpage & vp_mask) == guest_vp) + invalidate_pte(vcpu, pte); + } +} + +/* Flush with mask 0xffffff000 */ +static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) +{ + struct list_head *list; + struct hpte_cache *pte, *tmp; + u64 vp_mask = 0xffffff000ULL; + + list = &vcpu->arch.hpte_hash_vpte_long[ + kvmppc_mmu_hash_vpte_long(guest_vp)]; + + /* Check the list for matching entries */ + list_for_each_entry_safe(pte, tmp, list, list_vpte_long) { + /* Jump over the helper entry */ + if (&pte->list_vpte_long == list) + continue; + + /* Invalidate matching PTEs */ + if ((pte->pte.vpage & vp_mask) == guest_vp) + invalidate_pte(vcpu, pte); + } +} + +void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) +{ + dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", + vcpu->arch.hpte_cache_count, guest_vp, vp_mask); + guest_vp &= vp_mask; + + switch(vp_mask) { + case 0xfffffffffULL: + kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); + break; + case 0xffffff000ULL: + kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); + break; + default: + WARN_ON(1); + return; + } +} + +void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) +{ + struct hpte_cache *pte, *tmp; + int i; + + dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", + vcpu->arch.hpte_cache_count, pa_start, pa_end); + + for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { + struct list_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; + + list_for_each_entry_safe(pte, tmp, list, list_vpte_long) { + /* Jump over the helper entry */ + if (&pte->list_vpte_long == list) + continue; + + if ((pte->pte.raddr >= pa_start) && + (pte->pte.raddr < pa_end)) { + invalidate_pte(vcpu, pte); + } + } + } +} + +struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) +{ + struct hpte_cache *pte; + + pte = kmem_cache_zalloc(vcpu->arch.hpte_cache, GFP_KERNEL); + vcpu->arch.hpte_cache_count++; + + if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) + kvmppc_mmu_pte_flush_all(vcpu); + + return pte; +} + +void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) +{ + kvmppc_mmu_pte_flush(vcpu, 0, 0); + kmem_cache_destroy(vcpu->arch.hpte_cache); +} + +static void kvmppc_mmu_hpte_init_hash(struct list_head *hash_list, int len) +{ + int i; + + for (i = 0; i < len; i++) { + INIT_LIST_HEAD(&hash_list[i]); + } +} + +int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) +{ + char kmem_name[128]; + + /* init hpte slab cache */ + snprintf(kmem_name, 128, "kvm-spt-%p", vcpu); + vcpu->arch.hpte_cache = kmem_cache_create(kmem_name, + sizeof(struct hpte_cache), sizeof(struct hpte_cache), 0, NULL); + + /* init hpte lookup hashes */ + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, + ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, + ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); + kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, + ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); + + return 0; +}