@@ -249,6 +249,22 @@ static unsigned long damon_pa_mark_accessed(struct damon_region *r)
return applied * PAGE_SIZE;
}
+static unsigned long damon_pa_cold(struct damon_region *r)
+{
+ unsigned long addr, applied = 0;
+
+ for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
+ struct page *page = damon_get_page(PHYS_PFN(addr));
+
+ if (!page)
+ continue;
+ deactivate_page(page);
+ put_page(page);
+ applied++;
+ }
+ return applied * PAGE_SIZE;
+}
+
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
struct damos *scheme)
@@ -258,6 +274,8 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
return damon_pa_pageout(r);
case DAMOS_HOT:
return damon_pa_mark_accessed(r);
+ case DAMOS_COLD:
+ return damon_pa_cold(r);
default:
break;
}
@@ -273,6 +291,8 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
return damon_pageout_score(context, r, scheme);
case DAMOS_HOT:
return damon_hot_score(context, r, scheme);
+ case DAMOS_COLD:
+ return damon_pageout_score(context, r, scheme);
default:
break;
}
DAMOS_COLD is currently supported by the virtual address spaces monitoring operations set (vaddr). This commit adds support of the action to the physical address space monitoring operations set (paddr). Using this together with hot DAMOS action, users can proactively sort LRU lists so that performance degradation under memory pressure can be reduced. Signed-off-by: SeongJae Park <sj@kernel.org> --- mm/damon/paddr.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+)