Message ID | 1598928612-68996-1-git-send-email-alex.shi@linux.alibaba.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v3,1/3] mm/pageblock: mitigation cmpxchg false sharing in pageblock flags | expand |
On 9/1/20 4:50 AM, Alex Shi wrote: > pageblock_flags is used as long, since every pageblock_flags is just 4 > bits, 'long' size will include 8(32bit machine) or 16 pageblocks' flags, > that flag setting has to sync in cmpxchg with 7 or 15 other pageblock > flags. It would cause long waiting for sync. > > If we could change the pageblock_flags variable as char, we could use > char size cmpxchg, which just sync up with 2 pageblock flags. it could > relief much false sharing in cmpxchg. > > With this and next patch, we could see mmtests/thpscale get slight fast > on my 4 cores box, and cmpxchg retry times is reduced. > > pageblock pageblock pageblock rc2 rc2 rc2 > 16 16-2 16-3 a b c > Duration User 14.81 15.24 14.55 14.76 14.97 14.38 > Duration System 84.44 88.38 90.64 100.43 89.15 88.89 > Duration Elapsed 98.83 99.06 99.81 100.30 99.24 99.14 The large variance in these numbers suggest that 3 iterations are not enough to conclude a statistically significant difference. You'd need more iterations and calculate at least mean+variance. > rc2 is 5.9-rc2 kernel, pageblock is 5.9-rc2 + this patchset > > Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Mel Gorman <mgorman@techsingularity.net> > Cc: linux-mm@kvack.org > Cc: linux-kernel@vger.kernel.org > --- > include/linux/mmzone.h | 6 +++--- > include/linux/pageblock-flags.h | 2 +- > mm/page_alloc.c | 38 +++++++++++++++++++------------------- > 3 files changed, 23 insertions(+), 23 deletions(-) > > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h > index 8379432f4f2f..be676e659fb7 100644 > --- a/include/linux/mmzone.h > +++ b/include/linux/mmzone.h > @@ -437,7 +437,7 @@ struct zone { > * Flags for a pageblock_nr_pages block. See pageblock-flags.h. > * In SPARSEMEM, this map is stored in struct mem_section > */ > - unsigned long *pageblock_flags; > + unsigned char *pageblock_flags; > #endif /* CONFIG_SPARSEMEM */ > > /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ > @@ -1159,7 +1159,7 @@ struct mem_section_usage { > DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); > #endif > /* See declaration of similar field in struct zone */ > - unsigned long pageblock_flags[0]; > + unsigned char pageblock_flags[0]; > }; > > void subsection_map_init(unsigned long pfn, unsigned long nr_pages); > @@ -1212,7 +1212,7 @@ struct mem_section { > extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; > #endif > > -static inline unsigned long *section_to_usemap(struct mem_section *ms) > +static inline unsigned char *section_to_usemap(struct mem_section *ms) > { > return ms->usage->pageblock_flags; > } > diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h > index fff52ad370c1..d189441568eb 100644 > --- a/include/linux/pageblock-flags.h > +++ b/include/linux/pageblock-flags.h > @@ -54,7 +54,7 @@ enum pageblock_bits { > /* Forward declaration */ > struct page; > > -unsigned long get_pfnblock_flags_mask(struct page *page, > +unsigned char get_pfnblock_flags_mask(struct page *page, > unsigned long pfn, > unsigned long mask); > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index fab5e97dc9ca..81e96d4d9c42 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -445,7 +445,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) > #endif > > /* Return a pointer to the bitmap storing bits affecting a block of pages */ > -static inline unsigned long *get_pageblock_bitmap(struct page *page, > +static inline unsigned char *get_pageblock_bitmap(struct page *page, > unsigned long pfn) > { > #ifdef CONFIG_SPARSEMEM > @@ -474,24 +474,24 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) > * Return: pageblock_bits flags > */ > static __always_inline > -unsigned long __get_pfnblock_flags_mask(struct page *page, > +unsigned char __get_pfnblock_flags_mask(struct page *page, > unsigned long pfn, > unsigned long mask) > { > - unsigned long *bitmap; > - unsigned long bitidx, word_bitidx; > - unsigned long word; > + unsigned char *bitmap; > + unsigned long bitidx, byte_bitidx; > + unsigned char byte; > > bitmap = get_pageblock_bitmap(page, pfn); > bitidx = pfn_to_bitidx(page, pfn); > - word_bitidx = bitidx / BITS_PER_LONG; > - bitidx &= (BITS_PER_LONG-1); > + byte_bitidx = bitidx / BITS_PER_BYTE; > + bitidx &= (BITS_PER_BYTE-1); > > - word = bitmap[word_bitidx]; > - return (word >> bitidx) & mask; > + byte = bitmap[byte_bitidx]; > + return (byte >> bitidx) & mask; > } > > -unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, > +unsigned char get_pfnblock_flags_mask(struct page *page, unsigned long pfn, > unsigned long mask) > { > return __get_pfnblock_flags_mask(page, pfn, mask); > @@ -513,29 +513,29 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, > unsigned long pfn, > unsigned long mask) > { > - unsigned long *bitmap; > - unsigned long bitidx, word_bitidx; > - unsigned long old_word, word; > + unsigned char *bitmap; > + unsigned long bitidx, byte_bitidx; > + unsigned char old_byte, byte; > > BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); > BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); > > bitmap = get_pageblock_bitmap(page, pfn); > bitidx = pfn_to_bitidx(page, pfn); > - word_bitidx = bitidx / BITS_PER_LONG; > - bitidx &= (BITS_PER_LONG-1); > + byte_bitidx = bitidx / BITS_PER_BYTE; > + bitidx &= (BITS_PER_BYTE-1); > > VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); > > mask <<= bitidx; > flags <<= bitidx; > > - word = READ_ONCE(bitmap[word_bitidx]); > + byte = READ_ONCE(bitmap[byte_bitidx]); > for (;;) { > - old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); > - if (word == old_word) > + old_byte = cmpxchg(&bitmap[byte_bitidx], byte, (byte & ~mask) | flags); > + if (byte == old_byte) > break; > - word = old_word; > + byte = old_byte; > } > } > >
在 2020/9/2 上午1:06, Vlastimil Babka 写道: >> >> pageblock pageblock pageblock rc2 rc2 rc2 >> 16 16-2 16-3 a b c >> Duration User 14.81 15.24 14.55 14.76 14.97 14.38 >> Duration System 84.44 88.38 90.64 100.43 89.15 88.89 >> Duration Elapsed 98.83 99.06 99.81 100.30 99.24 99.14 > The large variance in these numbers suggest that 3 iterations are not enough to > conclude a statistically significant difference. You'd need more iterations and > calculate at least mean+variance. > on the machine I did seeing much variation more on Amean. but the trace event would be more straight. It could reduce the hit_cmpxchg from thousand time to hundreds or less. Thanks Alex diff --git a/include/trace/events/pageblock.h b/include/trace/events/pageblock.h new file mode 100644 index 000000000000..003c2d716f82 --- /dev/null +++ b/include/trace/events/pageblock.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pageblock + +#if !defined(_TRACE_PAGEBLOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGEBLOCK_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(hit_cmpxchg, + + TP_PROTO(char byte), + + TP_ARGS(byte), + + TP_STRUCT__entry( + __field(char, byte) + ), + + TP_fast_assign( + __entry->byte = byte; + ), + + TP_printk("%d", __entry->byte) +); + +#endif /* _TRACE_PAGE_ISOLATION_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 60342e764090..2422dec00484 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -509,6 +509,9 @@ static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned * @pfn: The target page frame number * @mask: mask of bits that the caller is interested in */ +#define CREATE_TRACE_POINTS +#include <trace/events/pageblock.h> + void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long mask) @@ -536,6 +539,7 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, if (byte == old_byte) break; byte = old_byte; + trace_hit_cmpxchg(byte); } }
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8379432f4f2f..be676e659fb7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -437,7 +437,7 @@ struct zone { * Flags for a pageblock_nr_pages block. See pageblock-flags.h. * In SPARSEMEM, this map is stored in struct mem_section */ - unsigned long *pageblock_flags; + unsigned char *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ @@ -1159,7 +1159,7 @@ struct mem_section_usage { DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); #endif /* See declaration of similar field in struct zone */ - unsigned long pageblock_flags[0]; + unsigned char pageblock_flags[0]; }; void subsection_map_init(unsigned long pfn, unsigned long nr_pages); @@ -1212,7 +1212,7 @@ struct mem_section { extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif -static inline unsigned long *section_to_usemap(struct mem_section *ms) +static inline unsigned char *section_to_usemap(struct mem_section *ms) { return ms->usage->pageblock_flags; } diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index fff52ad370c1..d189441568eb 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -54,7 +54,7 @@ enum pageblock_bits { /* Forward declaration */ struct page; -unsigned long get_pfnblock_flags_mask(struct page *page, +unsigned char get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long mask); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fab5e97dc9ca..81e96d4d9c42 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -445,7 +445,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) #endif /* Return a pointer to the bitmap storing bits affecting a block of pages */ -static inline unsigned long *get_pageblock_bitmap(struct page *page, +static inline unsigned char *get_pageblock_bitmap(struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM @@ -474,24 +474,24 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) * Return: pageblock_bits flags */ static __always_inline -unsigned long __get_pfnblock_flags_mask(struct page *page, +unsigned char __get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long mask) { - unsigned long *bitmap; - unsigned long bitidx, word_bitidx; - unsigned long word; + unsigned char *bitmap; + unsigned long bitidx, byte_bitidx; + unsigned char byte; bitmap = get_pageblock_bitmap(page, pfn); bitidx = pfn_to_bitidx(page, pfn); - word_bitidx = bitidx / BITS_PER_LONG; - bitidx &= (BITS_PER_LONG-1); + byte_bitidx = bitidx / BITS_PER_BYTE; + bitidx &= (BITS_PER_BYTE-1); - word = bitmap[word_bitidx]; - return (word >> bitidx) & mask; + byte = bitmap[byte_bitidx]; + return (byte >> bitidx) & mask; } -unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, +unsigned char get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long mask) { return __get_pfnblock_flags_mask(page, pfn, mask); @@ -513,29 +513,29 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long mask) { - unsigned long *bitmap; - unsigned long bitidx, word_bitidx; - unsigned long old_word, word; + unsigned char *bitmap; + unsigned long bitidx, byte_bitidx; + unsigned char old_byte, byte; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); bitmap = get_pageblock_bitmap(page, pfn); bitidx = pfn_to_bitidx(page, pfn); - word_bitidx = bitidx / BITS_PER_LONG; - bitidx &= (BITS_PER_LONG-1); + byte_bitidx = bitidx / BITS_PER_BYTE; + bitidx &= (BITS_PER_BYTE-1); VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); mask <<= bitidx; flags <<= bitidx; - word = READ_ONCE(bitmap[word_bitidx]); + byte = READ_ONCE(bitmap[byte_bitidx]); for (;;) { - old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); - if (word == old_word) + old_byte = cmpxchg(&bitmap[byte_bitidx], byte, (byte & ~mask) | flags); + if (byte == old_byte) break; - word = old_word; + byte = old_byte; } }
pageblock_flags is used as long, since every pageblock_flags is just 4 bits, 'long' size will include 8(32bit machine) or 16 pageblocks' flags, that flag setting has to sync in cmpxchg with 7 or 15 other pageblock flags. It would cause long waiting for sync. If we could change the pageblock_flags variable as char, we could use char size cmpxchg, which just sync up with 2 pageblock flags. it could relief much false sharing in cmpxchg. With this and next patch, we could see mmtests/thpscale get slight fast on my 4 cores box, and cmpxchg retry times is reduced. pageblock pageblock pageblock rc2 rc2 rc2 16 16-2 16-3 a b c Duration User 14.81 15.24 14.55 14.76 14.97 14.38 Duration System 84.44 88.38 90.64 100.43 89.15 88.89 Duration Elapsed 98.83 99.06 99.81 100.30 99.24 99.14 rc2 is 5.9-rc2 kernel, pageblock is 5.9-rc2 + this patchset Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org --- include/linux/mmzone.h | 6 +++--- include/linux/pageblock-flags.h | 2 +- mm/page_alloc.c | 38 +++++++++++++++++++------------------- 3 files changed, 23 insertions(+), 23 deletions(-)