diff mbox

[v13,4/5] mm: support reporting free page blocks

Message ID 1501742299-4369-5-git-send-email-wei.w.wang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wang, Wei W Aug. 3, 2017, 6:38 a.m. UTC
This patch adds support to walk through the free page blocks in the
system and report them via a callback function. Some page blocks may
leave the free list after the report function returns, so it is the
caller's responsibility to either detect or prevent the use of such
pages.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Signed-off-by: Liang Li <liang.z.li@intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
---
 include/linux/mm.h     |   7 ++++
 include/linux/mmzone.h |   5 +++
 mm/page_alloc.c        | 109 +++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 121 insertions(+)

Comments

Michal Hocko Aug. 3, 2017, 9:11 a.m. UTC | #1
On Thu 03-08-17 14:38:18, Wei Wang wrote:
> This patch adds support to walk through the free page blocks in the
> system and report them via a callback function. Some page blocks may
> leave the free list after the report function returns, so it is the
> caller's responsibility to either detect or prevent the use of such
> pages.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> Signed-off-by: Liang Li <liang.z.li@intel.com>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> ---
>  include/linux/mm.h     |   7 ++++
>  include/linux/mmzone.h |   5 +++
>  mm/page_alloc.c        | 109 +++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 121 insertions(+)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 46b9ac5..24481e3 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1835,6 +1835,13 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
>  		unsigned long zone_start_pfn, unsigned long *zholes_size);
>  extern void free_initmem(void);
>  
> +#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
> +extern void walk_free_mem_block(void *opaque1,
> +				unsigned int min_order,
> +				void (*visit)(void *opaque2,
> +					      unsigned long pfn,
> +					      unsigned long nr_pages));
> +#endif

Is the ifdef necessary. Sure only virtio balloon driver will use this
currently but this looks like a generic functionality not specific to
virtio at all so the ifdef is rather confusing.

>  /*
>   * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
>   * into the buddy system. The freed pages will be poisoned with pattern
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index fc14b8b..59eacf2 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -83,6 +83,11 @@ static inline bool is_migrate_movable(int mt)
>  	for (order = 0; order < MAX_ORDER; order++) \
>  		for (type = 0; type < MIGRATE_TYPES; type++)
>  
> +#define for_each_migratetype_order_decend(min_order, order, type) \
> +	for (order = MAX_ORDER - 1; order < MAX_ORDER && order >= min_order; \
> +	     order--) \
> +		for (type = 0; type < MIGRATE_TYPES; type++)
> +

Is there going to be any other user outside of mm/page_alloc.c? If not
then do not export this.

>  extern int page_group_by_mobility_disabled;
>  
>  #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 6d30e91..b90b513 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4761,6 +4761,115 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
>  	show_swap_cache_info();
>  }
>  
> +#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
> +
> +/*
> + * Heuristically get a free page block in the system.
> + *
> + * It is possible that pages from the page block are used immediately after
> + * report_free_page_block() returns. It is the caller's responsibility to
> + * either detect or prevent the use of such pages.
> + *
> + * The input parameters specify the free list to check for a free page block:
> + * zone->free_area[order].free_list[migratetype]
> + *
> + * If the caller supplied page block (i.e. **page) is on the free list, offer
> + * the next page block on the list to the caller. Otherwise, offer the first
> + * page block on the list.
> + *
> + * Return 0 when a page block is found on the caller specified free list.
> + * Otherwise, no page block is found.
> + */
> +static int report_free_page_block(struct zone *zone, unsigned int order,
> +				  unsigned int migratetype, struct page **page)

This is just too ugly and wrong actually. Never provide struct page
pointers outside of the zone->lock. What I've had in mind was to simply
walk free lists of the suitable order and call the callback for each one.
Something as simple as

	for (i = 0; i < MAX_NR_ZONES; i++) {
		struct zone *zone = &pgdat->node_zones[i];

		if (!populated_zone(zone))
			continue;
		spin_lock_irqsave(&zone->lock, flags);
		for (order = min_order; order < MAX_ORDER; ++order) {
			struct free_area *free_area = &zone->free_area[order];
			enum migratetype mt;
			struct page *page;

			if (!free_area->nr_pages)
				continue;

			for_each_migratetype_order(order, mt) {
				list_for_each_entry(page,
						&free_area->free_list[mt], lru) {

					pfn = page_to_pfn(page);
					visit(opaque2, prn, 1<<order);
				}
			}
		}

		spin_unlock_irqrestore(&zone->lock, flags);
	}

[...]

> +/*
> + * Walk through the free page blocks in the system. The @visit callback is
> + * invoked to handle each free page block.
> + *
> + * Note: some page blocks may be used after the report function returns, so it
> + * is not safe for the callback to use any pages or discard data on such page
> + * blocks.
> + */
> +void walk_free_mem_block(void *opaque1,
> +			 unsigned int min_order,
> +			 void (*visit)(void *opaque2,
> +				       unsigned long pfn,
> +				       unsigned long nr_pages))

Is there any reason why there is no node id? I guess you just do not
care for your particular use case. Not that I care too much either. If
somebody wants this per node then it would be trivial to extend I was
just wondering whether this is a deliberate decision or an omission.

> +{
> +	struct zone *zone = NULL;
> +	struct page *page = NULL;
> +	unsigned int order;
> +	unsigned long pfn, nr_pages;
> +	int type;
> +
> +	for_each_populated_zone(zone) {
> +		for_each_migratetype_order_decend(min_order, order, type) {
> +			while (!report_free_page_block(zone, order, type,
> +						       &page)) {
> +				pfn = page_to_pfn(page);
> +				nr_pages = 1 << order;
> +				visit(opaque1, pfn, nr_pages);
> +			}
> +		}
> +	}
> +}
> +EXPORT_SYMBOL_GPL(walk_free_mem_block);
> +
> +#endif
> +
>  static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
>  {
>  	zoneref->zone = zone;
> -- 
> 2.7.4
Wang, Wei W Aug. 3, 2017, 10:42 a.m. UTC | #2
On 08/03/2017 05:11 PM, Michal Hocko wrote:
> On Thu 03-08-17 14:38:18, Wei Wang wrote:
>> This patch adds support to walk through the free page blocks in the
>> system and report them via a callback function. Some page blocks may
>> leave the free list after the report function returns, so it is the
>> caller's responsibility to either detect or prevent the use of such
>> pages.
>>
>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>> Signed-off-by: Liang Li <liang.z.li@intel.com>
>> Cc: Michal Hocko <mhocko@kernel.org>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> ---
>>   include/linux/mm.h     |   7 ++++
>>   include/linux/mmzone.h |   5 +++
>>   mm/page_alloc.c        | 109 +++++++++++++++++++++++++++++++++++++++++++++++++
>>   3 files changed, 121 insertions(+)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 46b9ac5..24481e3 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -1835,6 +1835,13 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
>>   		unsigned long zone_start_pfn, unsigned long *zholes_size);
>>   extern void free_initmem(void);
>>   
>> +#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
>> +extern void walk_free_mem_block(void *opaque1,
>> +				unsigned int min_order,
>> +				void (*visit)(void *opaque2,
>> +					      unsigned long pfn,
>> +					      unsigned long nr_pages));
>> +#endif
> Is the ifdef necessary. Sure only virtio balloon driver will use this
> currently but this looks like a generic functionality not specific to
> virtio at all so the ifdef is rather confusing.

OK. We can remove the condition if no objection from others.


>
>>   extern int page_group_by_mobility_disabled;
>>   
>>   #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 6d30e91..b90b513 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -4761,6 +4761,115 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
>>   	show_swap_cache_info();
>>   }
>>   
>> +#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
>> +
>> +/*
>> + * Heuristically get a free page block in the system.
>> + *
>> + * It is possible that pages from the page block are used immediately after
>> + * report_free_page_block() returns. It is the caller's responsibility to
>> + * either detect or prevent the use of such pages.
>> + *
>> + * The input parameters specify the free list to check for a free page block:
>> + * zone->free_area[order].free_list[migratetype]
>> + *
>> + * If the caller supplied page block (i.e. **page) is on the free list, offer
>> + * the next page block on the list to the caller. Otherwise, offer the first
>> + * page block on the list.
>> + *
>> + * Return 0 when a page block is found on the caller specified free list.
>> + * Otherwise, no page block is found.
>> + */
>> +static int report_free_page_block(struct zone *zone, unsigned int order,
>> +				  unsigned int migratetype, struct page **page)
> This is just too ugly and wrong actually. Never provide struct page
> pointers outside of the zone->lock. What I've had in mind was to simply
> walk free lists of the suitable order and call the callback for each one.
> Something as simple as
>
> 	for (i = 0; i < MAX_NR_ZONES; i++) {
> 		struct zone *zone = &pgdat->node_zones[i];
>
> 		if (!populated_zone(zone))
> 			continue;
> 		spin_lock_irqsave(&zone->lock, flags);
> 		for (order = min_order; order < MAX_ORDER; ++order) {
> 			struct free_area *free_area = &zone->free_area[order];
> 			enum migratetype mt;
> 			struct page *page;
>
> 			if (!free_area->nr_pages)
> 				continue;
>
> 			for_each_migratetype_order(order, mt) {
> 				list_for_each_entry(page,
> 						&free_area->free_list[mt], lru) {
>
> 					pfn = page_to_pfn(page);
> 					visit(opaque2, prn, 1<<order);
> 				}
> 			}
> 		}
>
> 		spin_unlock_irqrestore(&zone->lock, flags);
> 	}
>
> [...]


I think the above would take the lock for too long time. That's why we 
prefer
to take one free page block each time, and taking it one by one also doesn't
make a difference, in terms of the performance that we need.

The struct page is used as a "state" to get the next free page block. It 
is only
given for an internal implementation of a function in mm ( not seen by the
outside caller). Would this be OK?
If not, how about pfn - we can also pass in pfn to the function, and do
pfn_to_page each time the function starts, and then do page_to_pfn when 
returns.


>> +/*
>> + * Walk through the free page blocks in the system. The @visit callback is
>> + * invoked to handle each free page block.
>> + *
>> + * Note: some page blocks may be used after the report function returns, so it
>> + * is not safe for the callback to use any pages or discard data on such page
>> + * blocks.
>> + */
>> +void walk_free_mem_block(void *opaque1,
>> +			 unsigned int min_order,
>> +			 void (*visit)(void *opaque2,
>> +				       unsigned long pfn,
>> +				       unsigned long nr_pages))
> Is there any reason why there is no node id? I guess you just do not
> care for your particular use case. Not that I care too much either. If
> somebody wants this per node then it would be trivial to extend I was
> just wondering whether this is a deliberate decision or an omission.
>

Right, we don't care about the node id. Live migration transfers all the 
guest
system memory, so we just want to get the hint of all the free page blocks
from the system.


Best,
Wei
Michal Hocko Aug. 3, 2017, 10:44 a.m. UTC | #3
On Thu 03-08-17 18:42:15, Wei Wang wrote:
> On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >On Thu 03-08-17 14:38:18, Wei Wang wrote:
[...]
> >>+static int report_free_page_block(struct zone *zone, unsigned int order,
> >>+				  unsigned int migratetype, struct page **page)
> >This is just too ugly and wrong actually. Never provide struct page
> >pointers outside of the zone->lock. What I've had in mind was to simply
> >walk free lists of the suitable order and call the callback for each one.
> >Something as simple as
> >
> >	for (i = 0; i < MAX_NR_ZONES; i++) {
> >		struct zone *zone = &pgdat->node_zones[i];
> >
> >		if (!populated_zone(zone))
> >			continue;
> >		spin_lock_irqsave(&zone->lock, flags);
> >		for (order = min_order; order < MAX_ORDER; ++order) {
> >			struct free_area *free_area = &zone->free_area[order];
> >			enum migratetype mt;
> >			struct page *page;
> >
> >			if (!free_area->nr_pages)
> >				continue;
> >
> >			for_each_migratetype_order(order, mt) {
> >				list_for_each_entry(page,
> >						&free_area->free_list[mt], lru) {
> >
> >					pfn = page_to_pfn(page);
> >					visit(opaque2, prn, 1<<order);
> >				}
> >			}
> >		}
> >
> >		spin_unlock_irqrestore(&zone->lock, flags);
> >	}
> >
> >[...]
> 
> 
> I think the above would take the lock for too long time. That's why we
> prefer to take one free page block each time, and taking it one by one
> also doesn't make a difference, in terms of the performance that we
> need.

I think you should start with simple approach and impove incrementally
if this turns out to be not optimal. I really detest taking struct pages
outside of the lock. You never know what might happen after the lock is
dropped. E.g. can you race with the memory hotremove?

> The struct page is used as a "state" to get the next free page block. It is
> only
> given for an internal implementation of a function in mm ( not seen by the
> outside caller). Would this be OK?
> If not, how about pfn - we can also pass in pfn to the function, and do
> pfn_to_page each time the function starts, and then do page_to_pfn when
> returns.

No, just do not try to play tricks with struct pages which might have
gone away.
Wang, Wei W Aug. 3, 2017, 11:27 a.m. UTC | #4
On 08/03/2017 06:44 PM, Michal Hocko wrote:
> On Thu 03-08-17 18:42:15, Wei Wang wrote:
>> On 08/03/2017 05:11 PM, Michal Hocko wrote:
>>> On Thu 03-08-17 14:38:18, Wei Wang wrote:
> [...]
>>>> +static int report_free_page_block(struct zone *zone, unsigned int order,
>>>> +				  unsigned int migratetype, struct page **page)
>>> This is just too ugly and wrong actually. Never provide struct page
>>> pointers outside of the zone->lock. What I've had in mind was to simply
>>> walk free lists of the suitable order and call the callback for each one.
>>> Something as simple as
>>>
>>> 	for (i = 0; i < MAX_NR_ZONES; i++) {
>>> 		struct zone *zone = &pgdat->node_zones[i];
>>>
>>> 		if (!populated_zone(zone))
>>> 			continue;
>>> 		spin_lock_irqsave(&zone->lock, flags);
>>> 		for (order = min_order; order < MAX_ORDER; ++order) {
>>> 			struct free_area *free_area = &zone->free_area[order];
>>> 			enum migratetype mt;
>>> 			struct page *page;
>>>
>>> 			if (!free_area->nr_pages)
>>> 				continue;
>>>
>>> 			for_each_migratetype_order(order, mt) {
>>> 				list_for_each_entry(page,
>>> 						&free_area->free_list[mt], lru) {
>>>
>>> 					pfn = page_to_pfn(page);
>>> 					visit(opaque2, prn, 1<<order);
>>> 				}
>>> 			}
>>> 		}
>>>
>>> 		spin_unlock_irqrestore(&zone->lock, flags);
>>> 	}
>>>
>>> [...]
>>
>> I think the above would take the lock for too long time. That's why we
>> prefer to take one free page block each time, and taking it one by one
>> also doesn't make a difference, in terms of the performance that we
>> need.
> I think you should start with simple approach and impove incrementally
> if this turns out to be not optimal. I really detest taking struct pages
> outside of the lock. You never know what might happen after the lock is
> dropped. E.g. can you race with the memory hotremove?


The caller won't use pages returned from the function, so I think there
shouldn't be an issue or race if the returned pages are used (i.e. not free
anymore) or simply gone due to hotremove.


Best,
Wei
Michal Hocko Aug. 3, 2017, 11:28 a.m. UTC | #5
On Thu 03-08-17 19:27:19, Wei Wang wrote:
> On 08/03/2017 06:44 PM, Michal Hocko wrote:
> >On Thu 03-08-17 18:42:15, Wei Wang wrote:
> >>On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >>>On Thu 03-08-17 14:38:18, Wei Wang wrote:
> >[...]
> >>>>+static int report_free_page_block(struct zone *zone, unsigned int order,
> >>>>+				  unsigned int migratetype, struct page **page)
> >>>This is just too ugly and wrong actually. Never provide struct page
> >>>pointers outside of the zone->lock. What I've had in mind was to simply
> >>>walk free lists of the suitable order and call the callback for each one.
> >>>Something as simple as
> >>>
> >>>	for (i = 0; i < MAX_NR_ZONES; i++) {
> >>>		struct zone *zone = &pgdat->node_zones[i];
> >>>
> >>>		if (!populated_zone(zone))
> >>>			continue;
> >>>		spin_lock_irqsave(&zone->lock, flags);
> >>>		for (order = min_order; order < MAX_ORDER; ++order) {
> >>>			struct free_area *free_area = &zone->free_area[order];
> >>>			enum migratetype mt;
> >>>			struct page *page;
> >>>
> >>>			if (!free_area->nr_pages)
> >>>				continue;
> >>>
> >>>			for_each_migratetype_order(order, mt) {
> >>>				list_for_each_entry(page,
> >>>						&free_area->free_list[mt], lru) {
> >>>
> >>>					pfn = page_to_pfn(page);
> >>>					visit(opaque2, prn, 1<<order);
> >>>				}
> >>>			}
> >>>		}
> >>>
> >>>		spin_unlock_irqrestore(&zone->lock, flags);
> >>>	}
> >>>
> >>>[...]
> >>
> >>I think the above would take the lock for too long time. That's why we
> >>prefer to take one free page block each time, and taking it one by one
> >>also doesn't make a difference, in terms of the performance that we
> >>need.
> >I think you should start with simple approach and impove incrementally
> >if this turns out to be not optimal. I really detest taking struct pages
> >outside of the lock. You never know what might happen after the lock is
> >dropped. E.g. can you race with the memory hotremove?
> 
> 
> The caller won't use pages returned from the function, so I think there
> shouldn't be an issue or race if the returned pages are used (i.e. not free
> anymore) or simply gone due to hotremove.

No, this is just too error prone. Consider that struct page pointer
itself could get invalid in the meantime. Please always keep robustness
in mind first. Optimizations are nice but it is even not clear whether
the simple variant will cause any problems.
Wang, Wei W Aug. 3, 2017, 12:11 p.m. UTC | #6
On 08/03/2017 07:28 PM, Michal Hocko wrote:
> On Thu 03-08-17 19:27:19, Wei Wang wrote:
>> On 08/03/2017 06:44 PM, Michal Hocko wrote:
>>> On Thu 03-08-17 18:42:15, Wei Wang wrote:
>>>> On 08/03/2017 05:11 PM, Michal Hocko wrote:
>>>>> On Thu 03-08-17 14:38:18, Wei Wang wrote:
>>> [...]
>>>>>> +static int report_free_page_block(struct zone *zone, unsigned int order,
>>>>>> +				  unsigned int migratetype, struct page **page)
>>>>> This is just too ugly and wrong actually. Never provide struct page
>>>>> pointers outside of the zone->lock. What I've had in mind was to simply
>>>>> walk free lists of the suitable order and call the callback for each one.
>>>>> Something as simple as
>>>>>
>>>>> 	for (i = 0; i < MAX_NR_ZONES; i++) {
>>>>> 		struct zone *zone = &pgdat->node_zones[i];
>>>>>
>>>>> 		if (!populated_zone(zone))
>>>>> 			continue;
>>>>> 		spin_lock_irqsave(&zone->lock, flags);
>>>>> 		for (order = min_order; order < MAX_ORDER; ++order) {
>>>>> 			struct free_area *free_area = &zone->free_area[order];
>>>>> 			enum migratetype mt;
>>>>> 			struct page *page;
>>>>>
>>>>> 			if (!free_area->nr_pages)
>>>>> 				continue;
>>>>>
>>>>> 			for_each_migratetype_order(order, mt) {
>>>>> 				list_for_each_entry(page,
>>>>> 						&free_area->free_list[mt], lru) {
>>>>>
>>>>> 					pfn = page_to_pfn(page);
>>>>> 					visit(opaque2, prn, 1<<order);
>>>>> 				}
>>>>> 			}
>>>>> 		}
>>>>>
>>>>> 		spin_unlock_irqrestore(&zone->lock, flags);
>>>>> 	}
>>>>>
>>>>> [...]
>>>> I think the above would take the lock for too long time. That's why we
>>>> prefer to take one free page block each time, and taking it one by one
>>>> also doesn't make a difference, in terms of the performance that we
>>>> need.
>>> I think you should start with simple approach and impove incrementally
>>> if this turns out to be not optimal. I really detest taking struct pages
>>> outside of the lock. You never know what might happen after the lock is
>>> dropped. E.g. can you race with the memory hotremove?
>>
>> The caller won't use pages returned from the function, so I think there
>> shouldn't be an issue or race if the returned pages are used (i.e. not free
>> anymore) or simply gone due to hotremove.
> No, this is just too error prone. Consider that struct page pointer
> itself could get invalid in the meantime. Please always keep robustness
> in mind first. Optimizations are nice but it is even not clear whether
> the simple variant will cause any problems.


how about this:

for_each_populated_zone(zone) {
               for_each_migratetype_order_decend(min_order, order, type) {
                     do {
      =>                  spin_lock_irqsave(&zone->lock, flags);
                         ret = report_free_page_block(zone, order, type,
                              &page)) {
                                pfn = page_to_pfn(page);
                                nr_pages = 1 << order;
                                visit(opaque1, pfn, nr_pages);
                          }
      => spin_unlock_irqrestore(&zone->lock, flags);
                     } while (!ret)
}

In this way, we can still keep the lock granularity at one free page block
while having the struct page operated under the lock.



Best,
Wei
Michal Hocko Aug. 3, 2017, 12:41 p.m. UTC | #7
On Thu 03-08-17 20:11:58, Wei Wang wrote:
> On 08/03/2017 07:28 PM, Michal Hocko wrote:
> >On Thu 03-08-17 19:27:19, Wei Wang wrote:
> >>On 08/03/2017 06:44 PM, Michal Hocko wrote:
> >>>On Thu 03-08-17 18:42:15, Wei Wang wrote:
> >>>>On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >>>>>On Thu 03-08-17 14:38:18, Wei Wang wrote:
> >>>[...]
> >>>>>>+static int report_free_page_block(struct zone *zone, unsigned int order,
> >>>>>>+				  unsigned int migratetype, struct page **page)
> >>>>>This is just too ugly and wrong actually. Never provide struct page
> >>>>>pointers outside of the zone->lock. What I've had in mind was to simply
> >>>>>walk free lists of the suitable order and call the callback for each one.
> >>>>>Something as simple as
> >>>>>
> >>>>>	for (i = 0; i < MAX_NR_ZONES; i++) {
> >>>>>		struct zone *zone = &pgdat->node_zones[i];
> >>>>>
> >>>>>		if (!populated_zone(zone))
> >>>>>			continue;
> >>>>>		spin_lock_irqsave(&zone->lock, flags);
> >>>>>		for (order = min_order; order < MAX_ORDER; ++order) {
> >>>>>			struct free_area *free_area = &zone->free_area[order];
> >>>>>			enum migratetype mt;
> >>>>>			struct page *page;
> >>>>>
> >>>>>			if (!free_area->nr_pages)
> >>>>>				continue;
> >>>>>
> >>>>>			for_each_migratetype_order(order, mt) {
> >>>>>				list_for_each_entry(page,
> >>>>>						&free_area->free_list[mt], lru) {
> >>>>>
> >>>>>					pfn = page_to_pfn(page);
> >>>>>					visit(opaque2, prn, 1<<order);
> >>>>>				}
> >>>>>			}
> >>>>>		}
> >>>>>
> >>>>>		spin_unlock_irqrestore(&zone->lock, flags);
> >>>>>	}
> >>>>>
> >>>>>[...]
> >>>>I think the above would take the lock for too long time. That's why we
> >>>>prefer to take one free page block each time, and taking it one by one
> >>>>also doesn't make a difference, in terms of the performance that we
> >>>>need.
> >>>I think you should start with simple approach and impove incrementally
> >>>if this turns out to be not optimal. I really detest taking struct pages
> >>>outside of the lock. You never know what might happen after the lock is
> >>>dropped. E.g. can you race with the memory hotremove?
> >>
> >>The caller won't use pages returned from the function, so I think there
> >>shouldn't be an issue or race if the returned pages are used (i.e. not free
> >>anymore) or simply gone due to hotremove.
> >No, this is just too error prone. Consider that struct page pointer
> >itself could get invalid in the meantime. Please always keep robustness
> >in mind first. Optimizations are nice but it is even not clear whether
> >the simple variant will cause any problems.
> 
> 
> how about this:
> 
> for_each_populated_zone(zone) {
>               for_each_migratetype_order_decend(min_order, order, type) {
>                     do {
>      =>                  spin_lock_irqsave(&zone->lock, flags);
>                         ret = report_free_page_block(zone, order, type,
>                              &page)) {
>                                pfn = page_to_pfn(page);
>                                nr_pages = 1 << order;
>                                visit(opaque1, pfn, nr_pages);
>                          }
>      => spin_unlock_irqrestore(&zone->lock, flags);
>                     } while (!ret)
> }
> 
> In this way, we can still keep the lock granularity at one free page block
> while having the struct page operated under the lock.

How can you continue iteration of free_list after the lock has been
dropped? If you want to keep the lock held for each migrate type then
why not. Just push the lock inside for_each_migratetype_order loop from
my example.
Wang, Wei W Aug. 3, 2017, 1:17 p.m. UTC | #8
On 08/03/2017 08:41 PM, Michal Hocko wrote:
> On Thu 03-08-17 20:11:58, Wei Wang wrote:
>> On 08/03/2017 07:28 PM, Michal Hocko wrote:
>>> On Thu 03-08-17 19:27:19, Wei Wang wrote:
>>>> On 08/03/2017 06:44 PM, Michal Hocko wrote:
>>>>> On Thu 03-08-17 18:42:15, Wei Wang wrote:
>>>>>> On 08/03/2017 05:11 PM, Michal Hocko wrote:
>>>>>>> On Thu 03-08-17 14:38:18, Wei Wang wrote:
>>>>> [...]
>>>>>>>> +static int report_free_page_block(struct zone *zone, unsigned int order,
>>>>>>>> +				  unsigned int migratetype, struct page **page)
>>>>>>> This is just too ugly and wrong actually. Never provide struct page
>>>>>>> pointers outside of the zone->lock. What I've had in mind was to simply
>>>>>>> walk free lists of the suitable order and call the callback for each one.
>>>>>>> Something as simple as
>>>>>>>
>>>>>>> 	for (i = 0; i < MAX_NR_ZONES; i++) {
>>>>>>> 		struct zone *zone = &pgdat->node_zones[i];
>>>>>>>
>>>>>>> 		if (!populated_zone(zone))
>>>>>>> 			continue;
>>>>>>> 		spin_lock_irqsave(&zone->lock, flags);
>>>>>>> 		for (order = min_order; order < MAX_ORDER; ++order) {
>>>>>>> 			struct free_area *free_area = &zone->free_area[order];
>>>>>>> 			enum migratetype mt;
>>>>>>> 			struct page *page;
>>>>>>>
>>>>>>> 			if (!free_area->nr_pages)
>>>>>>> 				continue;
>>>>>>>
>>>>>>> 			for_each_migratetype_order(order, mt) {
>>>>>>> 				list_for_each_entry(page,
>>>>>>> 						&free_area->free_list[mt], lru) {
>>>>>>>
>>>>>>> 					pfn = page_to_pfn(page);
>>>>>>> 					visit(opaque2, prn, 1<<order);
>>>>>>> 				}
>>>>>>> 			}
>>>>>>> 		}
>>>>>>>
>>>>>>> 		spin_unlock_irqrestore(&zone->lock, flags);
>>>>>>> 	}
>>>>>>>
>>>>>>> [...]
>>>>>> I think the above would take the lock for too long time. That's why we
>>>>>> prefer to take one free page block each time, and taking it one by one
>>>>>> also doesn't make a difference, in terms of the performance that we
>>>>>> need.
>>>>> I think you should start with simple approach and impove incrementally
>>>>> if this turns out to be not optimal. I really detest taking struct pages
>>>>> outside of the lock. You never know what might happen after the lock is
>>>>> dropped. E.g. can you race with the memory hotremove?
>>>> The caller won't use pages returned from the function, so I think there
>>>> shouldn't be an issue or race if the returned pages are used (i.e. not free
>>>> anymore) or simply gone due to hotremove.
>>> No, this is just too error prone. Consider that struct page pointer
>>> itself could get invalid in the meantime. Please always keep robustness
>>> in mind first. Optimizations are nice but it is even not clear whether
>>> the simple variant will cause any problems.
>>
>> how about this:
>>
>> for_each_populated_zone(zone) {
>>                for_each_migratetype_order_decend(min_order, order, type) {
>>                      do {
>>       =>                  spin_lock_irqsave(&zone->lock, flags);
>>                          ret = report_free_page_block(zone, order, type,
>>                               &page)) {
>>                                 pfn = page_to_pfn(page);
>>                                 nr_pages = 1 << order;
>>                                 visit(opaque1, pfn, nr_pages);
>>                           }
>>       => spin_unlock_irqrestore(&zone->lock, flags);
>>                      } while (!ret)
>> }
>>
>> In this way, we can still keep the lock granularity at one free page block
>> while having the struct page operated under the lock.
> How can you continue iteration of free_list after the lock has been
> dropped?

report_free_page_block() has handled all the possible cases after the 
lock is
dropped. For example, if the previous reported page has not been on the free
list, then the first node from the list of this order will be given. 
This is because
page allocation takes page blocks from the head to end, for example:

1,2,3,4,5,6
if the previous reported free block is 2, when we give 2 to the report 
function
to get the next page block, and find 1,2,3 have all gone, it will report 
4, which
is the head of the free list.

> If you want to keep the lock held for each migrate type then
> why not. Just push the lock inside for_each_migratetype_order loop from
> my example.
>

The above lock is held for each free page block, instead of each migrate 
type, since
the report function only reports one page block each time.


Best,
Wei
Michal Hocko Aug. 3, 2017, 1:50 p.m. UTC | #9
On Thu 03-08-17 21:17:25, Wei Wang wrote:
> On 08/03/2017 08:41 PM, Michal Hocko wrote:
> >On Thu 03-08-17 20:11:58, Wei Wang wrote:
> >>On 08/03/2017 07:28 PM, Michal Hocko wrote:
> >>>On Thu 03-08-17 19:27:19, Wei Wang wrote:
> >>>>On 08/03/2017 06:44 PM, Michal Hocko wrote:
> >>>>>On Thu 03-08-17 18:42:15, Wei Wang wrote:
> >>>>>>On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >>>>>>>On Thu 03-08-17 14:38:18, Wei Wang wrote:
> >>>>>[...]
> >>>>>>>>+static int report_free_page_block(struct zone *zone, unsigned int order,
> >>>>>>>>+				  unsigned int migratetype, struct page **page)
> >>>>>>>This is just too ugly and wrong actually. Never provide struct page
> >>>>>>>pointers outside of the zone->lock. What I've had in mind was to simply
> >>>>>>>walk free lists of the suitable order and call the callback for each one.
> >>>>>>>Something as simple as
> >>>>>>>
> >>>>>>>	for (i = 0; i < MAX_NR_ZONES; i++) {
> >>>>>>>		struct zone *zone = &pgdat->node_zones[i];
> >>>>>>>
> >>>>>>>		if (!populated_zone(zone))
> >>>>>>>			continue;
> >>>>>>>		spin_lock_irqsave(&zone->lock, flags);
> >>>>>>>		for (order = min_order; order < MAX_ORDER; ++order) {
> >>>>>>>			struct free_area *free_area = &zone->free_area[order];
> >>>>>>>			enum migratetype mt;
> >>>>>>>			struct page *page;
> >>>>>>>
> >>>>>>>			if (!free_area->nr_pages)
> >>>>>>>				continue;
> >>>>>>>
> >>>>>>>			for_each_migratetype_order(order, mt) {
> >>>>>>>				list_for_each_entry(page,
> >>>>>>>						&free_area->free_list[mt], lru) {
> >>>>>>>
> >>>>>>>					pfn = page_to_pfn(page);
> >>>>>>>					visit(opaque2, prn, 1<<order);
> >>>>>>>				}
> >>>>>>>			}
> >>>>>>>		}
> >>>>>>>
> >>>>>>>		spin_unlock_irqrestore(&zone->lock, flags);
> >>>>>>>	}
> >>>>>>>
> >>>>>>>[...]
> >>>>>>I think the above would take the lock for too long time. That's why we
> >>>>>>prefer to take one free page block each time, and taking it one by one
> >>>>>>also doesn't make a difference, in terms of the performance that we
> >>>>>>need.
> >>>>>I think you should start with simple approach and impove incrementally
> >>>>>if this turns out to be not optimal. I really detest taking struct pages
> >>>>>outside of the lock. You never know what might happen after the lock is
> >>>>>dropped. E.g. can you race with the memory hotremove?
> >>>>The caller won't use pages returned from the function, so I think there
> >>>>shouldn't be an issue or race if the returned pages are used (i.e. not free
> >>>>anymore) or simply gone due to hotremove.
> >>>No, this is just too error prone. Consider that struct page pointer
> >>>itself could get invalid in the meantime. Please always keep robustness
> >>>in mind first. Optimizations are nice but it is even not clear whether
> >>>the simple variant will cause any problems.
> >>
> >>how about this:
> >>
> >>for_each_populated_zone(zone) {
> >>               for_each_migratetype_order_decend(min_order, order, type) {
> >>                     do {
> >>      =>                  spin_lock_irqsave(&zone->lock, flags);
> >>                         ret = report_free_page_block(zone, order, type,
> >>                              &page)) {
> >>                                pfn = page_to_pfn(page);
> >>                                nr_pages = 1 << order;
> >>                                visit(opaque1, pfn, nr_pages);
> >>                          }
> >>      => spin_unlock_irqrestore(&zone->lock, flags);
> >>                     } while (!ret)
> >>}
> >>
> >>In this way, we can still keep the lock granularity at one free page block
> >>while having the struct page operated under the lock.
> >How can you continue iteration of free_list after the lock has been
> >dropped?
> 
> report_free_page_block() has handled all the possible cases after the lock
> is
> dropped. For example, if the previous reported page has not been on the free
> list, then the first node from the list of this order will be given. This is
> because
> page allocation takes page blocks from the head to end, for example:
> 
> 1,2,3,4,5,6
> if the previous reported free block is 2, when we give 2 to the report
> function
> to get the next page block, and find 1,2,3 have all gone, it will report 4,
> which
> is the head of the free list.

As I've said earlier. Start simple optimize incrementally with some
numbers to justify a more subtle code.
Wang, Wei W Aug. 3, 2017, 3:20 p.m. UTC | #10
On Thursday, August 3, 2017 9:51 PM, Michal Hocko: 
> As I've said earlier. Start simple optimize incrementally with some numbers to
> justify a more subtle code.
> --

OK. Let's start with the simple implementation as you suggested.

Best,
Wei
Michael S. Tsirkin Aug. 3, 2017, 9:02 p.m. UTC | #11
On Thu, Aug 03, 2017 at 03:20:09PM +0000, Wang, Wei W wrote:
> On Thursday, August 3, 2017 9:51 PM, Michal Hocko: 
> > As I've said earlier. Start simple optimize incrementally with some numbers to
> > justify a more subtle code.
> > --
> 
> OK. Let's start with the simple implementation as you suggested.
> 
> Best,
> Wei

The tricky part is when you need to drop the lock and
then restart because the device is busy. Would it maybe
make sense to rotate the list so that new head
will consist of pages not yet sent to device?
Michal Hocko Aug. 4, 2017, 7:53 a.m. UTC | #12
On Fri 04-08-17 00:02:01, Michael S. Tsirkin wrote:
> On Thu, Aug 03, 2017 at 03:20:09PM +0000, Wang, Wei W wrote:
> > On Thursday, August 3, 2017 9:51 PM, Michal Hocko: 
> > > As I've said earlier. Start simple optimize incrementally with some numbers to
> > > justify a more subtle code.
> > > --
> > 
> > OK. Let's start with the simple implementation as you suggested.
> > 
> > Best,
> > Wei
> 
> The tricky part is when you need to drop the lock and
> then restart because the device is busy. Would it maybe
> make sense to rotate the list so that new head
> will consist of pages not yet sent to device?

No, I this should be strictly non-modifying API.
Wang, Wei W Aug. 4, 2017, 8:15 a.m. UTC | #13
On 08/04/2017 03:53 PM, Michal Hocko wrote:
> On Fri 04-08-17 00:02:01, Michael S. Tsirkin wrote:
>> On Thu, Aug 03, 2017 at 03:20:09PM +0000, Wang, Wei W wrote:
>>> On Thursday, August 3, 2017 9:51 PM, Michal Hocko:
>>>> As I've said earlier. Start simple optimize incrementally with some numbers to
>>>> justify a more subtle code.
>>>> --
>>> OK. Let's start with the simple implementation as you suggested.
>>>
>>> Best,
>>> Wei
>> The tricky part is when you need to drop the lock and
>> then restart because the device is busy. Would it maybe
>> make sense to rotate the list so that new head
>> will consist of pages not yet sent to device?
> No, I this should be strictly non-modifying API.


Just get the context here for discussion:

     spin_lock_irqsave(&zone->lock, flags);
     ...
     visit(opaque2, pfn, 1<<order);
     spin_unlock_irqrestore(&zone->lock, flags);

The concern is that the callback may cause the lock be
taken too long.


I think here we can have two options:
- Option 1: Put a Note for the callback: the callback function
     should not block and it should finish as soon as possible.
     (when implementing an interrupt handler, we also have
     such similar rules in mind, right?).

For our use case, the callback just puts the reported page
block to the ring, then returns. If the ring is full as the host
is busy, then I think it should skip this one, and just return.
Because:
     A. This is an optimization feature, losing a couple of free
          pages to report isn't that important;
     B. In reality, I think it's uncommon to see this ring getting
         full (I didn't observe ring full in the tests), since the host
         (consumer) is notified to take out the page block right
         after it is added.

- Option 2: Put the callback function outside the lock
     What's input into the callback is just a pfn, and the callback
     won't access the corresponding pages. So, I still think it won't
     be an issue no matter what status of the pages is after they
     are reported (even they doesn't exit due to hot-remove).


What would you guys think?

Best,
Wei
Michal Hocko Aug. 4, 2017, 8:24 a.m. UTC | #14
On Fri 04-08-17 16:15:24, Wei Wang wrote:
> On 08/04/2017 03:53 PM, Michal Hocko wrote:
> >On Fri 04-08-17 00:02:01, Michael S. Tsirkin wrote:
> >>On Thu, Aug 03, 2017 at 03:20:09PM +0000, Wang, Wei W wrote:
> >>>On Thursday, August 3, 2017 9:51 PM, Michal Hocko:
> >>>>As I've said earlier. Start simple optimize incrementally with some numbers to
> >>>>justify a more subtle code.
> >>>>--
> >>>OK. Let's start with the simple implementation as you suggested.
> >>>
> >>>Best,
> >>>Wei
> >>The tricky part is when you need to drop the lock and
> >>then restart because the device is busy. Would it maybe
> >>make sense to rotate the list so that new head
> >>will consist of pages not yet sent to device?
> >No, I this should be strictly non-modifying API.
> 
> 
> Just get the context here for discussion:
> 
>     spin_lock_irqsave(&zone->lock, flags);
>     ...
>     visit(opaque2, pfn, 1<<order);
>     spin_unlock_irqrestore(&zone->lock, flags);
> 
> The concern is that the callback may cause the lock be
> taken too long.
> 
> 
> I think here we can have two options:
> - Option 1: Put a Note for the callback: the callback function
>     should not block and it should finish as soon as possible.
>     (when implementing an interrupt handler, we also have
>     such similar rules in mind, right?).

absolutely

> For our use case, the callback just puts the reported page
> block to the ring, then returns. If the ring is full as the host
> is busy, then I think it should skip this one, and just return.
> Because:
>     A. This is an optimization feature, losing a couple of free
>          pages to report isn't that important;
>     B. In reality, I think it's uncommon to see this ring getting
>         full (I didn't observe ring full in the tests), since the host
>         (consumer) is notified to take out the page block right
>         after it is added.

I thought you only updated a pre allocated bitmat... Anyway, I cannot
comment on this part much as I am not familiar with your usecase.
 
> - Option 2: Put the callback function outside the lock
>     What's input into the callback is just a pfn, and the callback
>     won't access the corresponding pages. So, I still think it won't
>     be an issue no matter what status of the pages is after they
>     are reported (even they doesn't exit due to hot-remove).

This would make the API implementation more complex and I am not yet
convinced we really need that.
Wang, Wei W Aug. 4, 2017, 8:55 a.m. UTC | #15
On 08/04/2017 04:24 PM, Michal Hocko wrote:
>
>> For our use case, the callback just puts the reported page
>> block to the ring, then returns. If the ring is full as the host
>> is busy, then I think it should skip this one, and just return.
>> Because:
>>      A. This is an optimization feature, losing a couple of free
>>           pages to report isn't that important;
>>      B. In reality, I think it's uncommon to see this ring getting
>>          full (I didn't observe ring full in the tests), since the host
>>          (consumer) is notified to take out the page block right
>>          after it is added.
> I thought you only updated a pre allocated bitmat... Anyway, I cannot
> comment on this part much as I am not familiar with your usecase.
>   

Actually the bitmap is in the hypervisor (host). The callback puts the
(pfn,size) on a ring which is shared with the hypervisor, then the
hypervisor takes that info from the ring and updates that bitmap.


Best,
Wei
Wang, Wei W Aug. 8, 2017, 6:12 a.m. UTC | #16
On 08/03/2017 05:11 PM, Michal Hocko wrote:
> On Thu 03-08-17 14:38:18, Wei Wang wrote:
> This is just too ugly and wrong actually. Never provide struct page
> pointers outside of the zone->lock. What I've had in mind was to simply
> walk free lists of the suitable order and call the callback for each one.
> Something as simple as
>
> 	for (i = 0; i < MAX_NR_ZONES; i++) {
> 		struct zone *zone = &pgdat->node_zones[i];
>
> 		if (!populated_zone(zone))
> 			continue;

Can we directly use for_each_populated_zone(zone) here?


> 		spin_lock_irqsave(&zone->lock, flags);
> 		for (order = min_order; order < MAX_ORDER; ++order) {


This appears to be covered by for_each_migratetype_order(order, mt) below.


> 			struct free_area *free_area = &zone->free_area[order];
> 			enum migratetype mt;
> 			struct page *page;
>
> 			if (!free_area->nr_pages)
> 				continue;
>
> 			for_each_migratetype_order(order, mt) {
> 				list_for_each_entry(page,
> 						&free_area->free_list[mt], lru) {
>
> 					pfn = page_to_pfn(page);
> 					visit(opaque2, prn, 1<<order);
> 				}
> 			}
> 		}
>
> 		spin_unlock_irqrestore(&zone->lock, flags);
> 	}
>
> [...]
>

What do you think if we further simply the above implementation like this:

for_each_populated_zone(zone) {
                 for_each_migratetype_order_decend(1, order, mt) {
                         spin_lock_irqsave(&zone->lock, flags);
                         list_for_each_entry(page,
&zone->free_area[order].free_list[mt], lru) {
                                 pfn = page_to_pfn(page);
                                 visit(opaque1, pfn, 1 << order);
                         }
                         spin_unlock_irqrestore(&zone->lock, flags);
                 }
         }


Best,
Wei
Wang, Wei W Aug. 8, 2017, 6:34 a.m. UTC | #17
On 08/08/2017 02:12 PM, Wei Wang wrote:
> On 08/03/2017 05:11 PM, Michal Hocko wrote:
>> On Thu 03-08-17 14:38:18, Wei Wang wrote:
>> This is just too ugly and wrong actually. Never provide struct page
>> pointers outside of the zone->lock. What I've had in mind was to simply
>> walk free lists of the suitable order and call the callback for each 
>> one.
>> Something as simple as
>>
>>     for (i = 0; i < MAX_NR_ZONES; i++) {
>>         struct zone *zone = &pgdat->node_zones[i];
>>
>>         if (!populated_zone(zone))
>>             continue;
>
> Can we directly use for_each_populated_zone(zone) here?
>
>
>> spin_lock_irqsave(&zone->lock, flags);
>>         for (order = min_order; order < MAX_ORDER; ++order) {
>
>
> This appears to be covered by for_each_migratetype_order(order, mt) 
> below.
>
>
>>             struct free_area *free_area = &zone->free_area[order];
>>             enum migratetype mt;
>>             struct page *page;
>>
>>             if (!free_area->nr_pages)
>>                 continue;
>>
>>             for_each_migratetype_order(order, mt) {
>>                 list_for_each_entry(page,
>>                         &free_area->free_list[mt], lru) {
>>
>>                     pfn = page_to_pfn(page);
>>                     visit(opaque2, prn, 1<<order);
>>                 }
>>             }
>>         }
>>
>>         spin_unlock_irqrestore(&zone->lock, flags);
>>     }
>>
>> [...]
>>
>
> What do you think if we further simply the above implementation like 
> this:
>
> for_each_populated_zone(zone) {
>                 for_each_migratetype_order_decend(1, order, mt) {

here it will be min_order (passed by the caller), instead of "1",
that is, for_each_migratetype_order_decend(min_order, order, mt)


> spin_lock_irqsave(&zone->lock, flags);
>                         list_for_each_entry(page,
> &zone->free_area[order].free_list[mt], lru) {
>                                 pfn = page_to_pfn(page);
>                                 visit(opaque1, pfn, 1 << order);
>                         }
>                         spin_unlock_irqrestore(&zone->lock, flags);
>                 }
>         }
>
>


Best,
Wei
Michal Hocko Aug. 10, 2017, 7:05 a.m. UTC | #18
On Tue 08-08-17 14:34:25, Wei Wang wrote:
> On 08/08/2017 02:12 PM, Wei Wang wrote:
> >On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >>On Thu 03-08-17 14:38:18, Wei Wang wrote:
> >>This is just too ugly and wrong actually. Never provide struct page
> >>pointers outside of the zone->lock. What I've had in mind was to simply
> >>walk free lists of the suitable order and call the callback for each
> >>one.
> >>Something as simple as
> >>
> >>    for (i = 0; i < MAX_NR_ZONES; i++) {
> >>        struct zone *zone = &pgdat->node_zones[i];
> >>
> >>        if (!populated_zone(zone))
> >>            continue;
> >
> >Can we directly use for_each_populated_zone(zone) here?

yes, my example couldn't because I was still assuming per-node API

> >>spin_lock_irqsave(&zone->lock, flags);
> >>        for (order = min_order; order < MAX_ORDER; ++order) {
> >
> >
> >This appears to be covered by for_each_migratetype_order(order, mt) below.

yes but
#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

so you would have to skip orders < min_order
Wang, Wei W Aug. 10, 2017, 7:38 a.m. UTC | #19
On 08/10/2017 03:05 PM, Michal Hocko wrote:
> On Tue 08-08-17 14:34:25, Wei Wang wrote:
>> On 08/08/2017 02:12 PM, Wei Wang wrote:
>>> On 08/03/2017 05:11 PM, Michal Hocko wrote:
>>>> On Thu 03-08-17 14:38:18, Wei Wang wrote:
>>>> This is just too ugly and wrong actually. Never provide struct page
>>>> pointers outside of the zone->lock. What I've had in mind was to simply
>>>> walk free lists of the suitable order and call the callback for each
>>>> one.
>>>> Something as simple as
>>>>
>>>>     for (i = 0; i < MAX_NR_ZONES; i++) {
>>>>         struct zone *zone = &pgdat->node_zones[i];
>>>>
>>>>         if (!populated_zone(zone))
>>>>             continue;
>>> Can we directly use for_each_populated_zone(zone) here?
> yes, my example couldn't because I was still assuming per-node API
>
>>>> spin_lock_irqsave(&zone->lock, flags);
>>>>         for (order = min_order; order < MAX_ORDER; ++order) {
>>>
>>> This appears to be covered by for_each_migratetype_order(order, mt) below.
> yes but
> #define for_each_migratetype_order(order, type) \
> 	for (order = 0; order < MAX_ORDER; order++) \
> 		for (type = 0; type < MIGRATE_TYPES; type++)
>
> so you would have to skip orders < min_order

Yes, that's why we have a new macro

#define for_each_migratetype_order_decend(min_order, order, type) \
  for (order = MAX_ORDER - 1; order < MAX_ORDER && order >= min_order; \
  order--) \
     for (type = 0; type < MIGRATE_TYPES; type++)

If you don't like the macro, we can also directly use it in the code.

I think it would be better to report the larger free page block first, since
the callback has an opportunity (though just a theoretical possibility, 
good to
take that into consideration if possible) to skip reporting the given 
free page
block to the hypervisor as the ring gets full. Losing the small block is 
better
than losing the larger one, in terms of the optimization work.


Best,
Wei
Michal Hocko Aug. 10, 2017, 7:53 a.m. UTC | #20
On Thu 10-08-17 15:38:34, Wei Wang wrote:
> On 08/10/2017 03:05 PM, Michal Hocko wrote:
> >On Tue 08-08-17 14:34:25, Wei Wang wrote:
> >>On 08/08/2017 02:12 PM, Wei Wang wrote:
> >>>On 08/03/2017 05:11 PM, Michal Hocko wrote:
> >>>>On Thu 03-08-17 14:38:18, Wei Wang wrote:
> >>>>This is just too ugly and wrong actually. Never provide struct page
> >>>>pointers outside of the zone->lock. What I've had in mind was to simply
> >>>>walk free lists of the suitable order and call the callback for each
> >>>>one.
> >>>>Something as simple as
> >>>>
> >>>>    for (i = 0; i < MAX_NR_ZONES; i++) {
> >>>>        struct zone *zone = &pgdat->node_zones[i];
> >>>>
> >>>>        if (!populated_zone(zone))
> >>>>            continue;
> >>>Can we directly use for_each_populated_zone(zone) here?
> >yes, my example couldn't because I was still assuming per-node API
> >
> >>>>spin_lock_irqsave(&zone->lock, flags);
> >>>>        for (order = min_order; order < MAX_ORDER; ++order) {
> >>>
> >>>This appears to be covered by for_each_migratetype_order(order, mt) below.
> >yes but
> >#define for_each_migratetype_order(order, type) \
> >	for (order = 0; order < MAX_ORDER; order++) \
> >		for (type = 0; type < MIGRATE_TYPES; type++)
> >
> >so you would have to skip orders < min_order
> 
> Yes, that's why we have a new macro
> 
> #define for_each_migratetype_order_decend(min_order, order, type) \
>  for (order = MAX_ORDER - 1; order < MAX_ORDER && order >= min_order; \
>  order--) \
>     for (type = 0; type < MIGRATE_TYPES; type++)
> 
> If you don't like the macro, we can also directly use it in the code.
> 
> I think it would be better to report the larger free page block first, since
> the callback has an opportunity (though just a theoretical possibility, good
> to
> take that into consideration if possible) to skip reporting the given free
> page
> block to the hypervisor as the ring gets full. Losing the small block is
> better
> than losing the larger one, in terms of the optimization work.

I see. But I think this is so specialized that opencoding the macro
would be easier to read.
diff mbox

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5..24481e3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1835,6 +1835,13 @@  extern void free_area_init_node(int nid, unsigned long * zones_size,
 		unsigned long zone_start_pfn, unsigned long *zholes_size);
 extern void free_initmem(void);
 
+#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
+extern void walk_free_mem_block(void *opaque1,
+				unsigned int min_order,
+				void (*visit)(void *opaque2,
+					      unsigned long pfn,
+					      unsigned long nr_pages));
+#endif
 /*
  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
  * into the buddy system. The freed pages will be poisoned with pattern
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fc14b8b..59eacf2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -83,6 +83,11 @@  static inline bool is_migrate_movable(int mt)
 	for (order = 0; order < MAX_ORDER; order++) \
 		for (type = 0; type < MIGRATE_TYPES; type++)
 
+#define for_each_migratetype_order_decend(min_order, order, type) \
+	for (order = MAX_ORDER - 1; order < MAX_ORDER && order >= min_order; \
+	     order--) \
+		for (type = 0; type < MIGRATE_TYPES; type++)
+
 extern int page_group_by_mobility_disabled;
 
 #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6d30e91..b90b513 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4761,6 +4761,115 @@  void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 	show_swap_cache_info();
 }
 
+#if IS_ENABLED(CONFIG_VIRTIO_BALLOON)
+
+/*
+ * Heuristically get a free page block in the system.
+ *
+ * It is possible that pages from the page block are used immediately after
+ * report_free_page_block() returns. It is the caller's responsibility to
+ * either detect or prevent the use of such pages.
+ *
+ * The input parameters specify the free list to check for a free page block:
+ * zone->free_area[order].free_list[migratetype]
+ *
+ * If the caller supplied page block (i.e. **page) is on the free list, offer
+ * the next page block on the list to the caller. Otherwise, offer the first
+ * page block on the list.
+ *
+ * Return 0 when a page block is found on the caller specified free list.
+ * Otherwise, no page block is found.
+ */
+static int report_free_page_block(struct zone *zone, unsigned int order,
+				  unsigned int migratetype, struct page **page)
+{
+	struct list_head *free_list;
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&zone->lock, flags);
+
+	free_list = &zone->free_area[order].free_list[migratetype];
+	if (list_empty(free_list)) {
+		*page = NULL;
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* The caller is asking for the first free page block on the list */
+	if (!(*page)) {
+		*page = list_first_entry(free_list, struct page, lru);
+		ret = 0;
+		goto out;
+	}
+
+	/*
+	 * The page block passed from the caller is not on this free list
+	 * anymore (e.g. a 1MB free page block has been split). In this case,
+	 * offer the first page block on the free list that the caller is
+	 * asking for.
+	 */
+	if (PageBuddy(*page) && order != page_order(*page)) {
+		*page = list_first_entry(free_list, struct page, lru);
+		ret = 0;
+		goto out;
+	}
+
+	/*
+	 * The page block passed from the caller has been the last page block
+	 * on the list.
+	 */
+	if ((*page)->lru.next == free_list) {
+		*page = NULL;
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/*
+	 * Finally, fall into the regular case: the page block passed from the
+	 * caller is still on the free list. Offer the next one.
+	 */
+	*page = list_next_entry((*page), lru);
+out:
+	spin_unlock_irqrestore(&zone->lock, flags);
+	return ret;
+}
+
+/*
+ * Walk through the free page blocks in the system. The @visit callback is
+ * invoked to handle each free page block.
+ *
+ * Note: some page blocks may be used after the report function returns, so it
+ * is not safe for the callback to use any pages or discard data on such page
+ * blocks.
+ */
+void walk_free_mem_block(void *opaque1,
+			 unsigned int min_order,
+			 void (*visit)(void *opaque2,
+				       unsigned long pfn,
+				       unsigned long nr_pages))
+{
+	struct zone *zone = NULL;
+	struct page *page = NULL;
+	unsigned int order;
+	unsigned long pfn, nr_pages;
+	int type;
+
+	for_each_populated_zone(zone) {
+		for_each_migratetype_order_decend(min_order, order, type) {
+			while (!report_free_page_block(zone, order, type,
+						       &page)) {
+				pfn = page_to_pfn(page);
+				nr_pages = 1 << order;
+				visit(opaque1, pfn, nr_pages);
+			}
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(walk_free_mem_block);
+
+#endif
+
 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
 {
 	zoneref->zone = zone;