@@ -16,6 +16,21 @@ struct ibs_access_work {
u64 laddr, paddr;
};
+static bool delay_hw_access_profiling(struct mm_struct *mm)
+{
+ unsigned long delay, now = jiffies;
+
+ if (!mm->numa_hw_access_delay)
+ mm->numa_hw_access_delay = now +
+ msecs_to_jiffies(sysctl_numa_balancing_access_faults_delay);
+
+ delay = mm->numa_hw_access_delay;
+ if (time_before(now, delay))
+ return true;
+
+ return false;
+}
+
void hw_access_sched_in(struct task_struct *prev, struct task_struct *curr)
{
u64 config = 0;
@@ -28,6 +43,9 @@ void hw_access_sched_in(struct task_struct *prev, struct task_struct *curr)
if (!curr->mm)
goto out;
+ if (delay_hw_access_profiling(curr->mm))
+ goto out;
+
if (curr->numa_sample_period)
period = curr->numa_sample_period;
else
@@ -1397,6 +1397,8 @@ static inline int folio_nid(const struct folio *folio)
}
#ifdef CONFIG_NUMA_BALANCING
+extern unsigned int sysctl_numa_balancing_access_faults_delay;
+
/* page access time bits needs to hold at least 4 seconds */
#define PAGE_ACCESS_TIME_MIN_BITS 12
#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
@@ -750,6 +750,9 @@ struct mm_struct {
/* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
+
+ /* HW-provided access info is collected after this initial delay */
+ unsigned long numa_hw_access_delay;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
@@ -342,6 +342,8 @@ static __init int sched_init_debug(void)
&sysctl_numa_balancing_sample_period_max);
debugfs_create_u32("access_faults_threshold", 0644, numa,
&sysctl_numa_balancing_access_faults_threshold);
+ debugfs_create_u32("access_faults_delay", 0644, numa,
+ &sysctl_numa_balancing_access_faults_delay);
#endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -1097,6 +1097,7 @@ unsigned int sysctl_numa_balancing_sample_period_def = 10000;
unsigned int sysctl_numa_balancing_sample_period_min = 5000;
unsigned int sysctl_numa_balancing_sample_period_max = 20000;
unsigned int sysctl_numa_balancing_access_faults_threshold = 250;
+unsigned int sysctl_numa_balancing_access_faults_delay = 1000;
/*
* Approximate time to scan a full NUMA task in ms. The task scan period is
@@ -3189,6 +3190,8 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
if (mm_users == 1) {
mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
mm->numa_scan_seq = 0;
+ mm->numa_hw_access_delay = jiffies +
+ msecs_to_jiffies(sysctl_numa_balancing_access_faults_delay);
}
}
p->node_stamp = 0;
Allow an initial delay before enabling the collection of IBS provided access info. Signed-off-by: Bharata B Rao <bharata@amd.com> --- arch/x86/mm/ibs.c | 18 ++++++++++++++++++ include/linux/mm.h | 2 ++ include/linux/mm_types.h | 3 +++ kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 3 +++ 5 files changed, 28 insertions(+)