diff mbox series

[v2] filemap: add trace events for get_pages, map_pages, and fault

Message ID 20240620161903.3176859-1-takayas@chromium.org (mailing list archive)
State New
Headers show
Series [v2] filemap: add trace events for get_pages, map_pages, and fault | expand

Commit Message

Takaya Saeki June 20, 2024, 4:19 p.m. UTC
To allow precise tracking of page caches accessed, add new tracepoints
that trigger when a process actually accesses them.

The ureadahead program used by ChromeOS traces the disk access of
programs as they start up at boot up. It uses mincore(2) or the
'mm_filemap_add_to_page_cache' trace event to accomplish this. It stores
this information in a "pack" file and on subsequent boots, it will read
the pack file and call readahead(2) on the information so that disk
storage can be loaded into RAM before the applications actually need it.

A problem we see is that due to the kernel's readahead algorithm that
can aggressively pull in more data than needed (to try and accomplish
the same goal) and this data is also recorded. The end result is that
the pack file contains a lot of pages on disk that are never actually
used. Calling readahead(2) on these unused pages can slow down the
system boot up times.

To solve this, add 3 new trace events, get_pages, map_pages, and fault.
These will be used to trace the pages are not only pulled in from disk,
but are actually used by the application. Only those pages will be
stored in the pack file, and this helps out the performance of boot up.

With the combination of these 3 new trace events and
mm_filemap_add_to_page_cache, we observed a reduction in the pack file
by 7.3% - 20% on ChromeOS varying by device.

Signed-off-by: Takaya Saeki <takayas@chromium.org>
---
Changelog between v2 and v1
- Fix a file offset type usage by casting pgoff_t to loff_t
- Fixed format string of dev and inode

 include/trace/events/filemap.h | 84 ++++++++++++++++++++++++++++++++++
 mm/filemap.c                   |  4 ++
 2 files changed, 88 insertions(+)

V1:https://lore.kernel.org/all/20240618093656.1944210-1-takayas@chromium.org/

Comments

Masami Hiramatsu (Google) June 26, 2024, 12:31 p.m. UTC | #1
On Thu, 20 Jun 2024 16:19:03 +0000
Takaya Saeki <takayas@chromium.org> wrote:

> To allow precise tracking of page caches accessed, add new tracepoints
> that trigger when a process actually accesses them.
> 
> The ureadahead program used by ChromeOS traces the disk access of
> programs as they start up at boot up. It uses mincore(2) or the
> 'mm_filemap_add_to_page_cache' trace event to accomplish this. It stores
> this information in a "pack" file and on subsequent boots, it will read
> the pack file and call readahead(2) on the information so that disk
> storage can be loaded into RAM before the applications actually need it.
> 
> A problem we see is that due to the kernel's readahead algorithm that
> can aggressively pull in more data than needed (to try and accomplish
> the same goal) and this data is also recorded. The end result is that
> the pack file contains a lot of pages on disk that are never actually
> used. Calling readahead(2) on these unused pages can slow down the
> system boot up times.
> 
> To solve this, add 3 new trace events, get_pages, map_pages, and fault.
> These will be used to trace the pages are not only pulled in from disk,
> but are actually used by the application. Only those pages will be
> stored in the pack file, and this helps out the performance of boot up.
> 
> With the combination of these 3 new trace events and
> mm_filemap_add_to_page_cache, we observed a reduction in the pack file
> by 7.3% - 20% on ChromeOS varying by device.
> 

This looks good to me from the trace-event point of view.

Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

Thanks!

> Signed-off-by: Takaya Saeki <takayas@chromium.org>
> ---
> Changelog between v2 and v1
> - Fix a file offset type usage by casting pgoff_t to loff_t
> - Fixed format string of dev and inode
> 
>  include/trace/events/filemap.h | 84 ++++++++++++++++++++++++++++++++++
>  mm/filemap.c                   |  4 ++
>  2 files changed, 88 insertions(+)
> 
> V1:https://lore.kernel.org/all/20240618093656.1944210-1-takayas@chromium.org/
> 
> diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
> index 46c89c1e460c..3a94bd633bf0 100644
> --- a/include/trace/events/filemap.h
> +++ b/include/trace/events/filemap.h
> @@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
>  	TP_ARGS(folio)
>  	);
>  
> +DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
> +
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +
> +	TP_ARGS(mapping, index, last_index),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned long, i_ino)
> +		__field(dev_t, s_dev)
> +		__field(unsigned long, index)
> +		__field(unsigned long, last_index)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->i_ino = mapping->host->i_ino;
> +		if (mapping->host->i_sb)
> +			__entry->s_dev =
> +				mapping->host->i_sb->s_dev;
> +		else
> +			__entry->s_dev = mapping->host->i_rdev;
> +		__entry->index = index;
> +		__entry->last_index = last_index;
> +	),
> +
> +	TP_printk(
> +		"dev=%d:%d ino=%lx ofs=%lld max_ofs=%lld",
> +		MAJOR(__entry->s_dev),
> +		MINOR(__entry->s_dev), __entry->i_ino,
> +		((loff_t)__entry->index) << PAGE_SHIFT,
> +		((loff_t)__entry->last_index) << PAGE_SHIFT
> +	)
> +);
> +
> +DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +	TP_ARGS(mapping, index, last_index)
> +);
> +
> +DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +	TP_ARGS(mapping, index, last_index)
> +);
> +
> +TRACE_EVENT(mm_filemap_fault,
> +	TP_PROTO(struct address_space *mapping, pgoff_t index),
> +
> +	TP_ARGS(mapping, index),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned long, i_ino)
> +		__field(dev_t, s_dev)
> +		__field(unsigned long, index)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->i_ino = mapping->host->i_ino;
> +		if (mapping->host->i_sb)
> +			__entry->s_dev =
> +				mapping->host->i_sb->s_dev;
> +		else
> +			__entry->s_dev = mapping->host->i_rdev;
> +		__entry->index = index;
> +	),
> +
> +	TP_printk(
> +		"dev=%d:%d ino=%lx ofs=%lld",
> +		MAJOR(__entry->s_dev),
> +		MINOR(__entry->s_dev), __entry->i_ino,
> +		((loff_t)__entry->index) << PAGE_SHIFT
> +	)
> +);
> +
>  TRACE_EVENT(filemap_set_wb_err,
>  		TP_PROTO(struct address_space *mapping, errseq_t eseq),
>  
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 876cc64aadd7..39f9d7fb3d2c 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -2556,6 +2556,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count,
>  			goto err;
>  	}
>  
> +	trace_mm_filemap_get_pages(mapping, index, last_index);
>  	return 0;
>  err:
>  	if (err < 0)
> @@ -3286,6 +3287,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
>  	if (unlikely(index >= max_idx))
>  		return VM_FAULT_SIGBUS;
>  
> +	trace_mm_filemap_fault(mapping, index);
> +
>  	/*
>  	 * Do we have something in the page cache already?
>  	 */
> @@ -3652,6 +3655,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
>  	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
>  	add_mm_counter(vma->vm_mm, folio_type, rss);
>  	pte_unmap_unlock(vmf->pte, vmf->ptl);
> +	trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
>  out:
>  	rcu_read_unlock();
>  
> -- 
> 2.45.2.627.g7a2c4fd464-goog
>
Steven Rostedt June 26, 2024, 1:58 p.m. UTC | #2
On Wed, 26 Jun 2024 21:31:57 +0900
Masami Hiramatsu (Google) <mhiramat@kernel.org> wrote:

> On Thu, 20 Jun 2024 16:19:03 +0000
> Takaya Saeki <takayas@chromium.org> wrote:
> 
> > To allow precise tracking of page caches accessed, add new tracepoints
> > that trigger when a process actually accesses them.
> > 
> > The ureadahead program used by ChromeOS traces the disk access of
> > programs as they start up at boot up. It uses mincore(2) or the
> > 'mm_filemap_add_to_page_cache' trace event to accomplish this. It stores
> > this information in a "pack" file and on subsequent boots, it will read
> > the pack file and call readahead(2) on the information so that disk
> > storage can be loaded into RAM before the applications actually need it.
> > 
> > A problem we see is that due to the kernel's readahead algorithm that
> > can aggressively pull in more data than needed (to try and accomplish
> > the same goal) and this data is also recorded. The end result is that
> > the pack file contains a lot of pages on disk that are never actually
> > used. Calling readahead(2) on these unused pages can slow down the
> > system boot up times.
> > 
> > To solve this, add 3 new trace events, get_pages, map_pages, and fault.
> > These will be used to trace the pages are not only pulled in from disk,
> > but are actually used by the application. Only those pages will be
> > stored in the pack file, and this helps out the performance of boot up.
> > 
> > With the combination of these 3 new trace events and
> > mm_filemap_add_to_page_cache, we observed a reduction in the pack file
> > by 7.3% - 20% on ChromeOS varying by device.
> >   
> 
> This looks good to me from the trace-event point of view.
> 
> Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

I added my reviewed-by on the last patch, you could have added it on
this one as it didn't change as much. But anyway, here it is again:

Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>

-- Steve
Takaya Saeki July 2, 2024, 10:27 a.m. UTC | #3
Hello all, and thank you so much for the review, Steven and Masami.

I'm currently considering replacing the `max_ofs` output with
`length`. Please let me know your thoughts.
With the current design, a memory range of an event is an inclusive
range of [ofs, max_ofs + 4096]. I found the `+4096` part confusing
during the ureadahead's upstreaming work. Replacing `max_ofs` with
`length` makes the range specified by an event much more concise.
Steven Rostedt July 2, 2024, 4:37 p.m. UTC | #4
On Tue, 2 Jul 2024 19:27:16 +0900
Takaya Saeki <takayas@chromium.org> wrote:

> Hello all, and thank you so much for the review, Steven and Masami.
> 
> I'm currently considering replacing the `max_ofs` output with
> `length`. Please let me know your thoughts.
> With the current design, a memory range of an event is an inclusive
> range of [ofs, max_ofs + 4096]. I found the `+4096` part confusing
> during the ureadahead's upstreaming work. Replacing `max_ofs` with
> `length` makes the range specified by an event much more concise.

This makes sense to me.

Matthew, have any comments on this?

Thanks,

-- Steve
Takaya Saeki July 10, 2024, 8:20 a.m. UTC | #5
Hello Matthew, I'd appreciate it if you could comment on this.

Thank you.
Steven Rostedt Aug. 8, 2024, 1:17 a.m. UTC | #6
On Thu, 20 Jun 2024 16:19:03 +0000
Takaya Saeki <takayas@chromium.org> wrote:

> +DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
> +
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +
> +	TP_ARGS(mapping, index, last_index),
> +
> +	TP_STRUCT__entry(
> +		__field(unsigned long, i_ino)
> +		__field(dev_t, s_dev)
> +		__field(unsigned long, index)
> +		__field(unsigned long, last_index)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->i_ino = mapping->host->i_ino;
> +		if (mapping->host->i_sb)
> +			__entry->s_dev =
> +				mapping->host->i_sb->s_dev;
> +		else
> +			__entry->s_dev = mapping->host->i_rdev;
> +		__entry->index = index;
> +		__entry->last_index = last_index;
> +	),
> +
> +	TP_printk(
> +		"dev=%d:%d ino=%lx ofs=%lld max_ofs=%lld",
> +		MAJOR(__entry->s_dev),
> +		MINOR(__entry->s_dev), __entry->i_ino,
> +		((loff_t)__entry->index) << PAGE_SHIFT,
> +		((loff_t)__entry->last_index) << PAGE_SHIFT
> +	)

Hmm, since the "ofs" is in decimal, perhaps we should just make it a range:

		"dev=%d:%d ino=%lx ofs=%lld-%lld",
		MAJOR(__entry->s_dev),
		MINOR(__entry->s_dev), __entry->i_ino,
		((loff_t)__entry->index) << PAGE_SHIFT,
		(((loff_t)__entry->last_index + 1) << PAGE_SHIFT - 1)

?

-- Steve

> +);
> +
> +DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +	TP_ARGS(mapping, index, last_index)
> +);
> +
> +DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
> +	TP_PROTO(
> +		struct address_space *mapping,
> +		pgoff_t index,
> +		pgoff_t last_index
> +	),
> +	TP_ARGS(mapping, index, last_index)
> +);
> +
Takaya Saeki Aug. 8, 2024, 4:19 p.m. UTC | #7
Oh yes. That sounds like a good idea. Let me update the patch with it.

Thanks,
Takaya Saeki
diff mbox series

Patch

diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..3a94bd633bf0 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@  DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
 	TP_ARGS(folio)
 	);
 
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+	TP_PROTO(
+		struct address_space *mapping,
+		pgoff_t index,
+		pgoff_t last_index
+	),
+
+	TP_ARGS(mapping, index, last_index),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, i_ino)
+		__field(dev_t, s_dev)
+		__field(unsigned long, index)
+		__field(unsigned long, last_index)
+	),
+
+	TP_fast_assign(
+		__entry->i_ino = mapping->host->i_ino;
+		if (mapping->host->i_sb)
+			__entry->s_dev =
+				mapping->host->i_sb->s_dev;
+		else
+			__entry->s_dev = mapping->host->i_rdev;
+		__entry->index = index;
+		__entry->last_index = last_index;
+	),
+
+	TP_printk(
+		"dev=%d:%d ino=%lx ofs=%lld max_ofs=%lld",
+		MAJOR(__entry->s_dev),
+		MINOR(__entry->s_dev), __entry->i_ino,
+		((loff_t)__entry->index) << PAGE_SHIFT,
+		((loff_t)__entry->last_index) << PAGE_SHIFT
+	)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+	TP_PROTO(
+		struct address_space *mapping,
+		pgoff_t index,
+		pgoff_t last_index
+	),
+	TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+	TP_PROTO(
+		struct address_space *mapping,
+		pgoff_t index,
+		pgoff_t last_index
+	),
+	TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+	TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+	TP_ARGS(mapping, index),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, i_ino)
+		__field(dev_t, s_dev)
+		__field(unsigned long, index)
+	),
+
+	TP_fast_assign(
+		__entry->i_ino = mapping->host->i_ino;
+		if (mapping->host->i_sb)
+			__entry->s_dev =
+				mapping->host->i_sb->s_dev;
+		else
+			__entry->s_dev = mapping->host->i_rdev;
+		__entry->index = index;
+	),
+
+	TP_printk(
+		"dev=%d:%d ino=%lx ofs=%lld",
+		MAJOR(__entry->s_dev),
+		MINOR(__entry->s_dev), __entry->i_ino,
+		((loff_t)__entry->index) << PAGE_SHIFT
+	)
+);
+
 TRACE_EVENT(filemap_set_wb_err,
 		TP_PROTO(struct address_space *mapping, errseq_t eseq),
 
diff --git a/mm/filemap.c b/mm/filemap.c
index 876cc64aadd7..39f9d7fb3d2c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2556,6 +2556,7 @@  static int filemap_get_pages(struct kiocb *iocb, size_t count,
 			goto err;
 	}
 
+	trace_mm_filemap_get_pages(mapping, index, last_index);
 	return 0;
 err:
 	if (err < 0)
@@ -3286,6 +3287,8 @@  vm_fault_t filemap_fault(struct vm_fault *vmf)
 	if (unlikely(index >= max_idx))
 		return VM_FAULT_SIGBUS;
 
+	trace_mm_filemap_fault(mapping, index);
+
 	/*
 	 * Do we have something in the page cache already?
 	 */
@@ -3652,6 +3655,7 @@  vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
 	add_mm_counter(vma->vm_mm, folio_type, rss);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+	trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
 out:
 	rcu_read_unlock();