Message ID | 20231214-vv-dax_abi-v5-3-3f7b006960b4@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Add DAX ABI for memmap_on_memory | expand |
On 14.12.23 08:37, Vishal Verma wrote: > In preparation for adding sysfs ABI to toggle memmap_on_memory semantics > for drivers adding memory, export the mhp_supports_memmap_on_memory() > helper. This allows drivers to check if memmap_on_memory support is > available before trying to request it, and display an appropriate > message if it isn't available. As part of this, remove the size argument > to this - with recent updates to allow memmap_on_memory for larger > ranges, and the internal splitting of altmaps into respective memory > blocks, the size argument is meaningless. > > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: David Hildenbrand <david@redhat.com> > Cc: Michal Hocko <mhocko@suse.com> > Cc: Oscar Salvador <osalvador@suse.de> > Cc: Dan Williams <dan.j.williams@intel.com> > Cc: Dave Jiang <dave.jiang@intel.com> > Cc: Dave Hansen <dave.hansen@linux.intel.com> > Cc: Huang Ying <ying.huang@intel.com> > Suggested-by: David Hildenbrand <david@redhat.com> > Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> > --- > include/linux/memory_hotplug.h | 6 ++++++ > mm/memory_hotplug.c | 17 ++++++----------- > 2 files changed, 12 insertions(+), 11 deletions(-) > > diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h > index 7d2076583494..ebc9d528f00c 100644 > --- a/include/linux/memory_hotplug.h > +++ b/include/linux/memory_hotplug.h > @@ -121,6 +121,7 @@ struct mhp_params { > > bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); > struct range mhp_get_pluggable_range(bool need_mapping); > +bool mhp_supports_memmap_on_memory(void); > > /* > * Zone resizing functions > @@ -262,6 +263,11 @@ static inline bool movable_node_is_enabled(void) > return false; > } > > +static bool mhp_supports_memmap_on_memory(void) > +{ > + return false; > +} > + > static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} > static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} > static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c > index 926e1cfb10e9..751664c519f7 100644 > --- a/mm/memory_hotplug.c > +++ b/mm/memory_hotplug.c > @@ -1325,7 +1325,7 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) > } > #endif > > -static bool mhp_supports_memmap_on_memory(unsigned long size) > +bool mhp_supports_memmap_on_memory(void) > { > unsigned long vmemmap_size = memory_block_memmap_size(); > unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); > @@ -1334,17 +1334,11 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) > * Besides having arch support and the feature enabled at runtime, we > * need a few more assumptions to hold true: > * > - * a) We span a single memory block: memory onlining/offlinin;g happens > - * in memory block granularity. We don't want the vmemmap of online > - * memory blocks to reside on offline memory blocks. In the future, > - * we might want to support variable-sized memory blocks to make the > - * feature more versatile. > - * > - * b) The vmemmap pages span complete PMDs: We don't want vmemmap code > + * a) The vmemmap pages span complete PMDs: We don't want vmemmap code > * to populate memory from the altmap for unrelated parts (i.e., > * other memory blocks) > * > - * c) The vmemmap pages (and thereby the pages that will be exposed to > + * b) The vmemmap pages (and thereby the pages that will be exposed to > * the buddy) have to cover full pageblocks: memory onlining/offlining > * code requires applicable ranges to be page-aligned, for example, to > * set the migratetypes properly. > @@ -1356,7 +1350,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) > * altmap as an alternative source of memory, and we do not exactly > * populate a single PMD. > */ > - if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) > + if (!mhp_memmap_on_memory()) > return false; > > /* > @@ -1379,6 +1373,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) > > return arch_supports_memmap_on_memory(vmemmap_size); > } > +EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory); > > static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size) > { > @@ -1512,7 +1507,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) > * Self hosted memmap array > */ > if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && > - mhp_supports_memmap_on_memory(memory_block_size_bytes())) { > + mhp_supports_memmap_on_memory()) { > ret = create_altmaps_and_memory_blocks(nid, group, start, size); > if (ret) > goto error; > Acked-by: David Hildenbrand <david@redhat.com>
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7d2076583494..ebc9d528f00c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -121,6 +121,7 @@ struct mhp_params { bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); struct range mhp_get_pluggable_range(bool need_mapping); +bool mhp_supports_memmap_on_memory(void); /* * Zone resizing functions @@ -262,6 +263,11 @@ static inline bool movable_node_is_enabled(void) return false; } +static bool mhp_supports_memmap_on_memory(void) +{ + return false; +} + static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 926e1cfb10e9..751664c519f7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1325,7 +1325,7 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) } #endif -static bool mhp_supports_memmap_on_memory(unsigned long size) +bool mhp_supports_memmap_on_memory(void) { unsigned long vmemmap_size = memory_block_memmap_size(); unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); @@ -1334,17 +1334,11 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) * Besides having arch support and the feature enabled at runtime, we * need a few more assumptions to hold true: * - * a) We span a single memory block: memory onlining/offlinin;g happens - * in memory block granularity. We don't want the vmemmap of online - * memory blocks to reside on offline memory blocks. In the future, - * we might want to support variable-sized memory blocks to make the - * feature more versatile. - * - * b) The vmemmap pages span complete PMDs: We don't want vmemmap code + * a) The vmemmap pages span complete PMDs: We don't want vmemmap code * to populate memory from the altmap for unrelated parts (i.e., * other memory blocks) * - * c) The vmemmap pages (and thereby the pages that will be exposed to + * b) The vmemmap pages (and thereby the pages that will be exposed to * the buddy) have to cover full pageblocks: memory onlining/offlining * code requires applicable ranges to be page-aligned, for example, to * set the migratetypes properly. @@ -1356,7 +1350,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) * altmap as an alternative source of memory, and we do not exactly * populate a single PMD. */ - if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) + if (!mhp_memmap_on_memory()) return false; /* @@ -1379,6 +1373,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) return arch_supports_memmap_on_memory(vmemmap_size); } +EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory); static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size) { @@ -1512,7 +1507,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) * Self hosted memmap array */ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && - mhp_supports_memmap_on_memory(memory_block_size_bytes())) { + mhp_supports_memmap_on_memory()) { ret = create_altmaps_and_memory_blocks(nid, group, start, size); if (ret) goto error;
In preparation for adding sysfs ABI to toggle memmap_on_memory semantics for drivers adding memory, export the mhp_supports_memmap_on_memory() helper. This allows drivers to check if memmap_on_memory support is available before trying to request it, and display an appropriate message if it isn't available. As part of this, remove the size argument to this - with recent updates to allow memmap_on_memory for larger ranges, and the internal splitting of altmaps into respective memory blocks, the size argument is meaningless. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Huang Ying <ying.huang@intel.com> Suggested-by: David Hildenbrand <david@redhat.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> --- include/linux/memory_hotplug.h | 6 ++++++ mm/memory_hotplug.c | 17 ++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-)