@@ -886,6 +886,8 @@ bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx);
+bool pgdat_toptier_balanced(pg_data_t *pgdat, int order, int classzone_idx);
+
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
@@ -1466,5 +1468,6 @@ void sparse_init(void);
#endif
#endif /* !__GENERATING_BOUNDS.H */
+
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
@@ -3519,6 +3519,8 @@ struct page *rmqueue(struct zone *preferred_zone,
if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
+ } else if (!pgdat_toptier_balanced(zone->zone_pgdat, order, zone_idx(zone))) {
+ wakeup_kswapd(zone, 0, 0, zone_idx(zone));
}
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
@@ -3625,7 +3625,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
return false;
}
-static bool pgdat_toptier_balanced(pg_data_t *pgdat, int order, int classzone_idx)
+bool pgdat_toptier_balanced(pg_data_t *pgdat, int order, int classzone_idx)
{
int i;
unsigned long mark;
Detect during page allocation whether free toptier memory is low. If so, wake up kswapd to reclaim memory from those mem cgroups that have exceeded their limit. Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> --- include/linux/mmzone.h | 3 +++ mm/page_alloc.c | 2 ++ mm/vmscan.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-)