@@ -51,3 +51,11 @@ Description: Control merging pages across different NUMA nodes.
When it is set to 0 only pages from the same node are merged,
otherwise pages from all nodes can be merged together (default).
+
+What: /sys/kernel/mm/ksm/general_profit
+Date: April 2023
+KernelVersion: 6.4
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Measure how effective KSM is.
+ general_profit: how effective is KSM. The formula for the
+ calculation is in Documentation/admin-guide/mm/ksm.rst.
@@ -157,6 +157,8 @@ stable_node_chains_prune_millisecs
The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``:
+general_profit
+ how effective is KSM. The calculation is explained below.
pages_shared
how many shared pages are being used
pages_sharing
@@ -207,7 +209,8 @@ several times, which are unprofitable memory consumed.
ksm_rmap_items * sizeof(rmap_item).
where ksm_merging_pages is shown under the directory ``/proc/<pid>/``,
- and ksm_rmap_items is shown in ``/proc/<pid>/ksm_stat``.
+ and ksm_rmap_items is shown in ``/proc/<pid>/ksm_stat``. The process profit
+ is also shown in ``/proc/<pid>/ksm_stat`` as ksm_process_profit.
From the perspective of application, a high ratio of ``ksm_rmap_items`` to
``ksm_merging_pages`` means a bad madvise-applied policy, so developers or
@@ -96,6 +96,7 @@
#include <linux/time_namespace.h>
#include <linux/resctrl.h>
#include <linux/cn_proc.h>
+#include <linux/ksm.h>
#include <trace/events/oom.h>
#include "internal.h"
#include "fd.h"
@@ -3207,6 +3208,8 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+ seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
+ seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
}
@@ -64,6 +64,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
+#ifdef CONFIG_PROC_FS
+long ksm_process_profit(struct mm_struct *);
+#endif /* CONFIG_PROC_FS */
+
#else /* !CONFIG_KSM */
static inline void ksm_add_vma(struct vm_area_struct *vma)
@@ -2964,6 +2964,14 @@ static void wait_while_offlining(void)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+#ifdef CONFIG_PROC_FS
+long ksm_process_profit(struct mm_struct *mm)
+{
+ return mm->ksm_merging_pages * PAGE_SIZE -
+ mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
+}
+#endif /* CONFIG_PROC_FS */
+
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -3228,6 +3236,18 @@ static ssize_t pages_volatile_show(struct kobject *kobj,
}
KSM_ATTR_RO(pages_volatile);
+static ssize_t general_profit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ long general_profit;
+
+ general_profit = ksm_pages_sharing * PAGE_SIZE -
+ ksm_rmap_items * sizeof(struct ksm_rmap_item);
+
+ return sysfs_emit(buf, "%ld\n", general_profit);
+}
+KSM_ATTR_RO(general_profit);
+
static ssize_t stable_node_dups_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -3292,6 +3312,7 @@ static struct attribute *ksm_attrs[] = {
&stable_node_dups_attr.attr,
&stable_node_chains_prune_millisecs_attr.attr,
&use_zero_pages_attr.attr,
+ &general_profit_attr.attr,
NULL,
};