diff mbox series

[v6,1/2] mm/vmstat: Use per cpu variable to track a vmstat discrepancy

Message ID 20220808194820.676246-2-atomlin@redhat.com (mailing list archive)
State New
Headers show
Series tick/sched: Ensure quiet_vmstat() is called when the idle tick was stopped too | expand

Commit Message

Aaron Tomlin Aug. 8, 2022, 7:48 p.m. UTC
This patch incorporates an idea from Marcelo's patch [1] where a
CPU-specific variable namely vmstat_dirty is used to indicate if
a vmstat imbalance is present for a given CPU. Therefore, at the
appropriate time, we can fold all the remaining differentials.

[1]: https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/

Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
---
 mm/vmstat.c | 46 +++++++++++++++-------------------------------
 1 file changed, 15 insertions(+), 31 deletions(-)

Comments

Marcelo Tosatti Aug. 15, 2022, 8:18 p.m. UTC | #1
Hi Aaron,

On Mon, Aug 08, 2022 at 08:48:19PM +0100, Aaron Tomlin wrote:
> This patch incorporates an idea from Marcelo's patch [1] where a
> CPU-specific variable namely vmstat_dirty is used to indicate if
> a vmstat imbalance is present for a given CPU. Therefore, at the
> appropriate time, we can fold all the remaining differentials.
> 
> [1]: https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/
> 
> Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
> ---
>  mm/vmstat.c | 46 +++++++++++++++-------------------------------
>  1 file changed, 15 insertions(+), 31 deletions(-)
> 
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 373d2730fcf2..51564b7c85fe 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -195,6 +195,12 @@ void fold_vm_numa_events(void)
>  #endif
>  
>  #ifdef CONFIG_SMP
> +static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
> +
> +static inline void mark_vmstat_dirty(void)
> +{
> +	this_cpu_write(vmstat_dirty, true);
> +}
>  
>  int calculate_pressure_threshold(struct zone *zone)
>  {
> @@ -367,6 +373,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
>  		x = 0;
>  	}
>  	__this_cpu_write(*p, x);
> +	mark_vmstat_dirty();
>  
>  	if (IS_ENABLED(CONFIG_PREEMPT_RT))
>  		preempt_enable();
> @@ -405,6 +412,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
>  		x = 0;
>  	}
>  	__this_cpu_write(*p, x);
> +	mark_vmstat_dirty();
>  
>  	if (IS_ENABLED(CONFIG_PREEMPT_RT))
>  		preempt_enable();
> @@ -603,6 +611,7 @@ static inline void mod_zone_state(struct zone *zone,
>  
>  	if (z)
>  		zone_page_state_add(z, zone, item);
> +	mark_vmstat_dirty();
>  }
>  
>  void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
> @@ -671,6 +680,7 @@ static inline void mod_node_state(struct pglist_data *pgdat,
>  
>  	if (z)
>  		node_page_state_add(z, pgdat, item);
> +	mark_vmstat_dirty();
>  }
>  
>  void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
> @@ -1873,6 +1883,7 @@ int sysctl_stat_interval __read_mostly = HZ;
>  static void refresh_vm_stats(struct work_struct *work)
>  {
>  	refresh_cpu_vm_stats(true);

Another task can be scheduled here, set data in
zone->per_cpu_zonestats->vm_stat_diff (after refresh_cpu_vm_stats
has cleared it to zero), and you'll end up marking
per-CPU vmstat_dirty bool as false incorrectly.

Guess it is necessary to disable preemption here.

> +	this_cpu_write(vmstat_dirty, false);
>  }
>  
>  int vmstat_refresh(struct ctl_table *table, int write,
> @@ -1937,6 +1948,7 @@ int vmstat_refresh(struct ctl_table *table, int write,
>  static void vmstat_update(struct work_struct *w)
>  {
>  	if (refresh_cpu_vm_stats(true)) {
> +		this_cpu_write(vmstat_dirty, false);

Same problem.

>  		/*
>  		 * Counters were updated so we expect more updates
>  		 * to occur in the future. Keep on running the
> @@ -1948,35 +1960,6 @@ static void vmstat_update(struct work_struct *w)
>  	}
>  }
>  
> -/*
> - * Check if the diffs for a certain cpu indicate that
> - * an update is needed.
> - */
> -static bool need_update(int cpu)
> -{
> -	pg_data_t *last_pgdat = NULL;
> -	struct zone *zone;
> -
> -	for_each_populated_zone(zone) {
> -		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
> -		struct per_cpu_nodestat *n;
> -
> -		/*
> -		 * The fast way of checking if there are any vmstat diffs.
> -		 */
> -		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
> -			return true;
> -
> -		if (last_pgdat == zone->zone_pgdat)
> -			continue;
> -		last_pgdat = zone->zone_pgdat;
> -		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
> -		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
> -			return true;
> -	}
> -	return false;
> -}
> -
>  /*
>   * Switch off vmstat processing and then fold all the remaining differentials
>   * until the diffs stay at zero. The function is used by NOHZ and can only be
> @@ -1990,7 +1973,7 @@ void quiet_vmstat(void)
>  	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
>  		return;
>  
> -	if (!need_update(smp_processor_id()))
> +	if (!__this_cpu_read(vmstat_dirty))
>  		return;
>  
>  	/*
> @@ -2000,6 +1983,7 @@ void quiet_vmstat(void)
>  	 * vmstat_shepherd will take care about that for us.
>  	 */
>  	refresh_cpu_vm_stats(false);
> +	__this_cpu_write(vmstat_dirty, false);
>  }

From this context, irqs should be disabled.

>  /*
> @@ -2021,7 +2005,7 @@ static void vmstat_shepherd(struct work_struct *w)
>  	for_each_online_cpu(cpu) {
>  		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
>  
> -		if (!delayed_work_pending(dw) && need_update(cpu))
> +		if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
>  			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
>  
>  		cond_resched();
> -- 
> 2.37.1
> 
>
diff mbox series

Patch

diff --git a/mm/vmstat.c b/mm/vmstat.c
index 373d2730fcf2..51564b7c85fe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -195,6 +195,12 @@  void fold_vm_numa_events(void)
 #endif
 
 #ifdef CONFIG_SMP
+static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
+
+static inline void mark_vmstat_dirty(void)
+{
+	this_cpu_write(vmstat_dirty, true);
+}
 
 int calculate_pressure_threshold(struct zone *zone)
 {
@@ -367,6 +373,7 @@  void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 		x = 0;
 	}
 	__this_cpu_write(*p, x);
+	mark_vmstat_dirty();
 
 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
@@ -405,6 +412,7 @@  void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
 		x = 0;
 	}
 	__this_cpu_write(*p, x);
+	mark_vmstat_dirty();
 
 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
@@ -603,6 +611,7 @@  static inline void mod_zone_state(struct zone *zone,
 
 	if (z)
 		zone_page_state_add(z, zone, item);
+	mark_vmstat_dirty();
 }
 
 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -671,6 +680,7 @@  static inline void mod_node_state(struct pglist_data *pgdat,
 
 	if (z)
 		node_page_state_add(z, pgdat, item);
+	mark_vmstat_dirty();
 }
 
 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
@@ -1873,6 +1883,7 @@  int sysctl_stat_interval __read_mostly = HZ;
 static void refresh_vm_stats(struct work_struct *work)
 {
 	refresh_cpu_vm_stats(true);
+	this_cpu_write(vmstat_dirty, false);
 }
 
 int vmstat_refresh(struct ctl_table *table, int write,
@@ -1937,6 +1948,7 @@  int vmstat_refresh(struct ctl_table *table, int write,
 static void vmstat_update(struct work_struct *w)
 {
 	if (refresh_cpu_vm_stats(true)) {
+		this_cpu_write(vmstat_dirty, false);
 		/*
 		 * Counters were updated so we expect more updates
 		 * to occur in the future. Keep on running the
@@ -1948,35 +1960,6 @@  static void vmstat_update(struct work_struct *w)
 	}
 }
 
-/*
- * Check if the diffs for a certain cpu indicate that
- * an update is needed.
- */
-static bool need_update(int cpu)
-{
-	pg_data_t *last_pgdat = NULL;
-	struct zone *zone;
-
-	for_each_populated_zone(zone) {
-		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
-		struct per_cpu_nodestat *n;
-
-		/*
-		 * The fast way of checking if there are any vmstat diffs.
-		 */
-		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
-			return true;
-
-		if (last_pgdat == zone->zone_pgdat)
-			continue;
-		last_pgdat = zone->zone_pgdat;
-		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
-		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
-			return true;
-	}
-	return false;
-}
-
 /*
  * Switch off vmstat processing and then fold all the remaining differentials
  * until the diffs stay at zero. The function is used by NOHZ and can only be
@@ -1990,7 +1973,7 @@  void quiet_vmstat(void)
 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
 		return;
 
-	if (!need_update(smp_processor_id()))
+	if (!__this_cpu_read(vmstat_dirty))
 		return;
 
 	/*
@@ -2000,6 +1983,7 @@  void quiet_vmstat(void)
 	 * vmstat_shepherd will take care about that for us.
 	 */
 	refresh_cpu_vm_stats(false);
+	__this_cpu_write(vmstat_dirty, false);
 }
 
 /*
@@ -2021,7 +2005,7 @@  static void vmstat_shepherd(struct work_struct *w)
 	for_each_online_cpu(cpu) {
 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
-		if (!delayed_work_pending(dw) && need_update(cpu))
+		if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
 
 		cond_resched();