@@ -315,6 +315,68 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+TRACE_EVENT(guest_free_page,
+ TP_PROTO(unsigned long pfn, unsigned int order),
+
+ TP_ARGS(pfn, order),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->order = order;
+ ),
+
+ TP_printk("pfn=%lu order=%d",
+ __entry->pfn,
+ __entry->order)
+);
+
+TRACE_EVENT(guest_isolated_page,
+ TP_PROTO(unsigned long pfn, unsigned int order),
+
+ TP_ARGS(pfn, order),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned int, order)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->order = order;
+ ),
+
+ TP_printk("pfn=%lu order=%u",
+ __entry->pfn,
+ __entry->order)
+);
+
+TRACE_EVENT(guest_captured_page,
+ TP_PROTO(unsigned long pfn, unsigned int order, int idx),
+
+ TP_ARGS(pfn, order, idx),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned int, order)
+ __field(int, idx)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->order = order;
+ __entry->idx = idx;
+ ),
+
+ TP_printk("pfn=%lu order=%u array_index=%d",
+ __entry->pfn,
+ __entry->order,
+ __entry->idx)
+);
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
@@ -4,6 +4,7 @@
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <linux/sort.h>
+#include <trace/events/kmem.h>
/*
* struct guest_free_pages- holds array of guest freed PFN's along with an
@@ -178,6 +179,8 @@ static void guest_free_page_hinting(void)
ret = __isolate_free_page(page, buddy_order);
if (ret) {
+ trace_guest_isolated_page(pfn,
+ buddy_order);
isolated_pages_obj[hyp_idx].pfn = pfn;
isolated_pages_obj[hyp_idx].order =
buddy_order;
@@ -198,6 +201,8 @@ static void guest_free_page_hinting(void)
unsigned long buddy_pfn =
page_to_pfn(buddy_page);
+ trace_guest_isolated_page(buddy_pfn,
+ buddy_order);
isolated_pages_obj[hyp_idx].pfn =
buddy_pfn;
isolated_pages_obj[hyp_idx].order =
@@ -255,9 +260,12 @@ void guest_free_page_enqueue(struct page *page, int order)
local_irq_save(flags);
hinting_obj = this_cpu_ptr(&free_pages_obj);
l_idx = hinting_obj->free_pages_idx;
+ trace_guest_free_page(page_to_pfn(page), order);
if (l_idx != MAX_FGPT_ENTRIES) {
if (PageBuddy(page) && page_private(page) >=
FREE_PAGE_HINTING_MIN_ORDER) {
+ trace_guest_captured_page(page_to_pfn(page), order,
+ l_idx);
hinting_obj->free_page_arr[l_idx] = page_to_pfn(page);
hinting_obj->free_pages_idx += 1;
} else {
@@ -268,7 +276,11 @@ void guest_free_page_enqueue(struct page *page, int order)
!if_exist(buddy_page)) {
unsigned long buddy_pfn =
page_to_pfn(buddy_page);
+ unsigned int buddy_order =
+ page_private(buddy_page);
+ trace_guest_captured_page(buddy_pfn,
+ buddy_order, l_idx);
hinting_obj->free_page_arr[l_idx] =
buddy_pfn;
hinting_obj->free_pages_idx += 1;
This patch enables to track the pages freed by the guest and the pages isolated by the page hinting code through kernel tracepoints. Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com> --- include/trace/events/kmem.h | 62 +++++++++++++++++++++++++++++++++++++ virt/kvm/page_hinting.c | 12 +++++++ 2 files changed, 74 insertions(+)