Message ID | 1411500894-30542-1-git-send-email-andreslc@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Il 23/09/2014 21:34, Andres Lagar-Cavilla ha scritto: > Callbacks don't have to do extra computation to learn what the caller > (lvm_handle_hva_range()) knows very well. Useful for > debugging/tracing/printk/future. > > Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com> > --- > arch/x86/kvm/mmu.c | 38 ++++++++++++++++++++++---------------- > include/trace/events/kvm.h | 10 ++++++---- > 2 files changed, 28 insertions(+), 20 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index f33d5e4..cc14eba 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn) > } > > static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, > - struct kvm_memory_slot *slot, unsigned long data) > + struct kvm_memory_slot *slot, gfn_t gfn, int level, > + unsigned long data) > { > u64 *sptep; > struct rmap_iterator iter; > @@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, > > while ((sptep = rmap_get_first(*rmapp, &iter))) { > BUG_ON(!(*sptep & PT_PRESENT_MASK)); > - rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep); > + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n", > + sptep, *sptep, gfn, level); > > drop_spte(kvm, sptep); > need_tlb_flush = 1; > @@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, > } > > static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, > - struct kvm_memory_slot *slot, unsigned long data) > + struct kvm_memory_slot *slot, gfn_t gfn, int level, > + unsigned long data) > { > u64 *sptep; > struct rmap_iterator iter; > @@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, > > for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { > BUG_ON(!is_shadow_present_pte(*sptep)); > - rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep); > + rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", > + sptep, *sptep, gfn, level); > > need_flush = 1; > > @@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm, > int (*handler)(struct kvm *kvm, > unsigned long *rmapp, > struct kvm_memory_slot *slot, > + gfn_t gfn, > + int level, > unsigned long data)) > { > int j; > @@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, > j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { > unsigned long idx, idx_end; > unsigned long *rmapp; > + gfn_t gfn = gfn_start; > > /* > * {idx(page_j) | page_j intersects with > @@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm, > > rmapp = __gfn_to_rmap(gfn_start, j, memslot); > > - for (; idx <= idx_end; ++idx) > - ret |= handler(kvm, rmapp++, memslot, data); > + for (; idx <= idx_end; > + ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j))) > + ret |= handler(kvm, rmapp++, memslot, > + gfn, j, data); > } > } > > @@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, > unsigned long data, > int (*handler)(struct kvm *kvm, unsigned long *rmapp, > struct kvm_memory_slot *slot, > + gfn_t gfn, int level, > unsigned long data)) > { > return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); > @@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) > } > > static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, > - struct kvm_memory_slot *slot, unsigned long data) > + struct kvm_memory_slot *slot, gfn_t gfn, int level, > + unsigned long data) > { > u64 *sptep; > struct rmap_iterator uninitialized_var(iter); > @@ -1410,25 +1421,20 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, > > for (sptep = rmap_get_first(*rmapp, &iter); sptep; > sptep = rmap_get_next(&iter)) { > - struct kvm_mmu_page *sp; > - gfn_t gfn; > BUG_ON(!is_shadow_present_pte(*sptep)); > - /* From spte to gfn. */ > - sp = page_header(__pa(sptep)); > - gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); > - > if (*sptep & shadow_accessed_mask) { > young = 1; > clear_bit((ffs(shadow_accessed_mask) - 1), > (unsigned long *)sptep); > } > - trace_kvm_age_page(gfn, slot, young); > + trace_kvm_age_page(gfn, level, slot, young); > } > return young; > } > > static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, > - struct kvm_memory_slot *slot, unsigned long data) > + struct kvm_memory_slot *slot, gfn_t gfn, > + int level, unsigned long data) > { > u64 *sptep; > struct rmap_iterator iter; > @@ -1466,7 +1472,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) > > rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); > > - kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0); > + kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); > kvm_flush_remote_tlbs(vcpu->kvm); > } > > diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h > index 0d2de78..6edf1f2 100644 > --- a/include/trace/events/kvm.h > +++ b/include/trace/events/kvm.h > @@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu, > ); > > TRACE_EVENT(kvm_age_page, > - TP_PROTO(ulong gfn, struct kvm_memory_slot *slot, int ref), > - TP_ARGS(gfn, slot, ref), > + TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), > + TP_ARGS(gfn, level, slot, ref), > > TP_STRUCT__entry( > __field( u64, hva ) > __field( u64, gfn ) > + __field( u8, level ) > __field( u8, referenced ) > ), > > TP_fast_assign( > __entry->gfn = gfn; > + __entry->level = level; > __entry->hva = ((gfn - slot->base_gfn) << > PAGE_SHIFT) + slot->userspace_addr; > __entry->referenced = ref; > ), > > - TP_printk("hva %llx gfn %llx %s", > - __entry->hva, __entry->gfn, > + TP_printk("hva %llx gfn %llx level %u %s", > + __entry->hva, __entry->gfn, __entry->level, > __entry->referenced ? "YOUNG" : "OLD") > ); > > Looks good, thanks. Paolo -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f33d5e4..cc14eba 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn) } static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, - struct kvm_memory_slot *slot, unsigned long data) + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) { u64 *sptep; struct rmap_iterator iter; @@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, while ((sptep = rmap_get_first(*rmapp, &iter))) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); - rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep); + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n", + sptep, *sptep, gfn, level); drop_spte(kvm, sptep); need_tlb_flush = 1; @@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, } static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, - struct kvm_memory_slot *slot, unsigned long data) + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) { u64 *sptep; struct rmap_iterator iter; @@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!is_shadow_present_pte(*sptep)); - rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep); + rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", + sptep, *sptep, gfn, level); need_flush = 1; @@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm, int (*handler)(struct kvm *kvm, unsigned long *rmapp, struct kvm_memory_slot *slot, + gfn_t gfn, + int level, unsigned long data)) { int j; @@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { unsigned long idx, idx_end; unsigned long *rmapp; + gfn_t gfn = gfn_start; /* * {idx(page_j) | page_j intersects with @@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm, rmapp = __gfn_to_rmap(gfn_start, j, memslot); - for (; idx <= idx_end; ++idx) - ret |= handler(kvm, rmapp++, memslot, data); + for (; idx <= idx_end; + ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j))) + ret |= handler(kvm, rmapp++, memslot, + gfn, j, data); } } @@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, unsigned long data, int (*handler)(struct kvm *kvm, unsigned long *rmapp, struct kvm_memory_slot *slot, + gfn_t gfn, int level, unsigned long data)) { return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); @@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) } static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, - struct kvm_memory_slot *slot, unsigned long data) + struct kvm_memory_slot *slot, gfn_t gfn, int level, + unsigned long data) { u64 *sptep; struct rmap_iterator uninitialized_var(iter); @@ -1410,25 +1421,20 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { - struct kvm_mmu_page *sp; - gfn_t gfn; BUG_ON(!is_shadow_present_pte(*sptep)); - /* From spte to gfn. */ - sp = page_header(__pa(sptep)); - gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); - if (*sptep & shadow_accessed_mask) { young = 1; clear_bit((ffs(shadow_accessed_mask) - 1), (unsigned long *)sptep); } - trace_kvm_age_page(gfn, slot, young); + trace_kvm_age_page(gfn, level, slot, young); } return young; } static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, - struct kvm_memory_slot *slot, unsigned long data) + struct kvm_memory_slot *slot, gfn_t gfn, + int level, unsigned long data) { u64 *sptep; struct rmap_iterator iter; @@ -1466,7 +1472,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); - kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0); + kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); kvm_flush_remote_tlbs(vcpu->kvm); } diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 0d2de78..6edf1f2 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu, ); TRACE_EVENT(kvm_age_page, - TP_PROTO(ulong gfn, struct kvm_memory_slot *slot, int ref), - TP_ARGS(gfn, slot, ref), + TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), + TP_ARGS(gfn, level, slot, ref), TP_STRUCT__entry( __field( u64, hva ) __field( u64, gfn ) + __field( u8, level ) __field( u8, referenced ) ), TP_fast_assign( __entry->gfn = gfn; + __entry->level = level; __entry->hva = ((gfn - slot->base_gfn) << PAGE_SHIFT) + slot->userspace_addr; __entry->referenced = ref; ), - TP_printk("hva %llx gfn %llx %s", - __entry->hva, __entry->gfn, + TP_printk("hva %llx gfn %llx level %u %s", + __entry->hva, __entry->gfn, __entry->level, __entry->referenced ? "YOUNG" : "OLD") );
Callbacks don't have to do extra computation to learn what the caller (lvm_handle_hva_range()) knows very well. Useful for debugging/tracing/printk/future. Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com> --- arch/x86/kvm/mmu.c | 38 ++++++++++++++++++++++---------------- include/trace/events/kvm.h | 10 ++++++---- 2 files changed, 28 insertions(+), 20 deletions(-)