@@ -95,6 +95,8 @@ struct damon_ctx {
/* callbacks */
void (*init_target_regions)(struct damon_ctx *context);
void (*update_target_regions)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
void (*sample_cb)(struct damon_ctx *context);
void (*aggregate_cb)(struct damon_ctx *context);
};
@@ -59,6 +59,8 @@
static void kdamond_init_vm_regions(struct damon_ctx *ctx);
static void kdamond_update_vm_regions(struct damon_ctx *ctx);
+static void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx);
+static unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx);
/* A monitoring context for debugfs interface users. */
static struct damon_ctx damon_user_ctx = {
@@ -70,6 +72,8 @@ static struct damon_ctx damon_user_ctx = {
.init_target_regions = kdamond_init_vm_regions,
.update_target_regions = kdamond_update_vm_regions,
+ .prepare_access_checks = kdamond_prepare_vm_access_checks,
+ .check_accesses = kdamond_check_vm_accesses,
};
/*
@@ -506,7 +510,7 @@ static void damon_mkold(struct mm_struct *mm, unsigned long addr)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
-static void damon_prepare_access_check(struct damon_ctx *ctx,
+static void damon_prepare_vm_access_check(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
r->sampling_addr = damon_rand(ctx, r->ar.start, r->ar.end);
@@ -514,7 +518,7 @@ static void damon_prepare_access_check(struct damon_ctx *ctx,
damon_mkold(mm, r->sampling_addr);
}
-static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
+static void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -525,7 +529,7 @@ static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- damon_prepare_access_check(ctx, mm, r);
+ damon_prepare_vm_access_check(ctx, mm, r);
mmput(mm);
}
}
@@ -563,7 +567,7 @@ static bool damon_young(struct mm_struct *mm, unsigned long addr,
* mm 'mm_struct' for the given virtual address space
* r the region to be checked
*/
-static void damon_check_access(struct damon_ctx *ctx,
+static void damon_check_vm_access(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
static struct mm_struct *last_mm;
@@ -587,7 +591,7 @@ static void damon_check_access(struct damon_ctx *ctx,
last_addr = r->sampling_addr;
}
-static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
+static unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -599,12 +603,12 @@ static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t) {
- damon_check_access(ctx, mm, r);
+ damon_check_vm_access(ctx, mm, r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
-
mmput(mm);
}
+
return max_nr_accesses;
}
@@ -1134,13 +1138,13 @@ static int kdamond_fn(void *data)
pr_info("kdamond (%d) starts\n", ctx->kdamond->pid);
ctx->init_target_regions(ctx);
while (!kdamond_need_stop(ctx)) {
- kdamond_prepare_access_checks(ctx);
+ ctx->prepare_access_checks(ctx);
if (ctx->sample_cb)
ctx->sample_cb(ctx);
usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
- max_nr_accesses = kdamond_check_accesses(ctx);
+ max_nr_accesses = ctx->check_accesses(ctx);
if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);