@@ -1994,8 +1994,18 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member, int node)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
+ long val;
+ WARN_ON(node == NUMA_NO_NODE && member == MM_NO_TYPE);
+
+ if (node == NUMA_NO_NODE)
+ val = atomic_long_read(&mm->rss_stat.count[member]);
+ else
+#ifdef CONFIG_NUMA
+ val = atomic_long_read(&mm->rss_stat.numa_count[node]);
+#else
+ val = 0;
+#endif
#ifdef SPLIT_RSS_COUNTING
/*
* counter is updated in asynchronous manner and may go to minus.
@@ -2012,23 +2022,41 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long member_count, int
static inline void add_mm_counter(struct mm_struct *mm, int member, long value, int node)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, value);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_add_return(value, &mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, value);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, 1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_inc_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, 1);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member, int node)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ long member_count = 0, numa_count = 0;
- mm_trace_rss_stat(mm, member, count, NUMA_NO_NODE, 0, -1);
+ if (member != MM_NO_TYPE)
+ member_count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+#ifdef CONFIG_NUMA
+ if (node != NUMA_NO_NODE)
+ numa_count = atomic_long_dec_return(&mm->rss_stat.numa_count[node]);
+#endif
+ mm_trace_rss_stat(mm, member, member_count, node, numa_count, -1);
}
/* Optimized variant when page is already known not to be PageAnon */
@@ -188,6 +188,14 @@ void sync_mm_rss(struct mm_struct *mm)
current->rss_stat.count[i] = 0;
}
}
+#ifdef CONFIG_NUMA
+ for_each_node(i) {
+ if (current->rss_stat.numa_count[i]) {
+ add_mm_counter(mm, MM_NO_TYPE, current->rss_stat.numa_count[i], i);
+ current->rss_stat.numa_count[i] = 0;
+ }
+ }
+#endif
current->rss_stat.events = 0;
}
@@ -195,9 +203,12 @@ static void add_mm_counter_fast(struct mm_struct *mm, int member, int val, int n
{
struct task_struct *task = current;
- if (likely(task->mm == mm))
+ if (likely(task->mm == mm)) {
task->rss_stat.count[member] += val;
- else
+#ifdef CONFIG_NUMA
+ task->rss_stat.numa_count[node] += val;
+#endif
+ } else
add_mm_counter(mm, member, val, node);
}
#define inc_mm_counter_fast(mm, member, node) add_mm_counter_fast(mm, member, 1, node)
@@ -508,6 +519,11 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss, int *numa_rss)
for (i = 0; i < NR_MM_COUNTERS; i++)
if (rss[i])
add_mm_counter(mm, i, rss[i], NUMA_NO_NODE);
+#ifdef CONFIG_NUMA
+ for_each_node(i)
+ if (numa_rss[i] != 0)
+ add_mm_counter(mm, MM_NO_TYPE, numa_rss[i], i);
+#endif
}
/*
Now we have all the infrastructure ready. Modify `get/add/inc/dec_mm_counter`, `sync_mm_rss`, `add_mm_counter_fast` and `add_mm_rss_vec` to enable per numa node rss_stat count. Signed-off-by: Gang Li <ligang.bdlg@bytedance.com> --- include/linux/mm.h | 42 +++++++++++++++++++++++++++++++++++------- mm/memory.c | 20 ++++++++++++++++++-- 2 files changed, 53 insertions(+), 9 deletions(-)