@@ -160,13 +160,18 @@ struct damos {
*
* @init_target_regions: Constructs initial monitoring target regions.
* @update_target_regions: Updates monitoring target regions.
+ * @prepare_access_checks: Prepares next access check of target regions.
+ * @check_accesses: Checks the access of target regions.
* @sample_cb: Called for each sampling interval.
* @aggregate_cb: Called for each aggregation interval.
*
* The monitoring thread calls @init_target_regions before starting the
- * monitoring, @update_target_regions for each @regions_update_interval. By
+ * monitoring, @update_target_regions for each @regions_update_interval, and
+ * @prepare_access_checks and @check_accesses for each @sample_interval. By
* setting these callbacks to appropriate functions, therefore, users can
- * monitor specific range of virtual address space.
+ * monitor any address space with special handling. If these are not
+ * explicitly configured, the functions for virtual memory address space
+ * monitoring are used.
*
* @sample_cb and @aggregate_cb are called from @kdamond for each of the
* sampling intervals and aggregation intervals, respectively. Therefore,
@@ -199,6 +204,8 @@ struct damon_ctx {
/* callbacks */
void (*init_target_regions)(struct damon_ctx *context);
void (*update_target_regions)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
void (*sample_cb)(struct damon_ctx *context);
void (*aggregate_cb)(struct damon_ctx *context);
};
@@ -206,6 +213,8 @@ struct damon_ctx {
/* Reference callback implementations for virtual memory */
void kdamond_init_vm_regions(struct damon_ctx *ctx);
void kdamond_update_vm_regions(struct damon_ctx *ctx);
+void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx);
+unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx);
int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids);
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
@@ -75,6 +75,8 @@ static struct damon_ctx damon_user_ctx = {
.init_target_regions = kdamond_init_vm_regions,
.update_target_regions = kdamond_update_vm_regions,
+ .prepare_access_checks = kdamond_prepare_vm_access_checks,
+ .check_accesses = kdamond_check_vm_accesses,
};
/*
@@ -507,7 +509,7 @@ static void damon_mkold(struct mm_struct *mm, unsigned long addr)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
-static void damon_prepare_access_check(struct damon_ctx *ctx,
+static void damon_prepare_vm_access_check(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -515,7 +517,7 @@ static void damon_prepare_access_check(struct damon_ctx *ctx,
damon_mkold(mm, r->sampling_addr);
}
-static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
+void kdamond_prepare_vm_access_checks(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -526,7 +528,7 @@ static void kdamond_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- damon_prepare_access_check(ctx, mm, r);
+ damon_prepare_vm_access_check(ctx, mm, r);
mmput(mm);
}
}
@@ -564,7 +566,7 @@ static bool damon_young(struct mm_struct *mm, unsigned long addr,
* mm 'mm_struct' for the given virtual address space
* r the region to be checked
*/
-static void damon_check_access(struct damon_ctx *ctx,
+static void damon_check_vm_access(struct damon_ctx *ctx,
struct mm_struct *mm, struct damon_region *r)
{
static struct mm_struct *last_mm;
@@ -588,7 +590,7 @@ static void damon_check_access(struct damon_ctx *ctx,
last_addr = r->sampling_addr;
}
-static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
+unsigned int kdamond_check_vm_accesses(struct damon_ctx *ctx)
{
struct damon_task *t;
struct mm_struct *mm;
@@ -600,12 +602,12 @@ static unsigned int kdamond_check_accesses(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t) {
- damon_check_access(ctx, mm, r);
+ damon_check_vm_access(ctx, mm, r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
-
mmput(mm);
}
+
return max_nr_accesses;
}
@@ -1111,13 +1113,13 @@ static int kdamond_fn(void *data)
kdamond_write_record_header(ctx);
while (!kdamond_need_stop(ctx)) {
- kdamond_prepare_access_checks(ctx);
+ ctx->prepare_access_checks(ctx);
if (ctx->sample_cb)
ctx->sample_cb(ctx);
usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
- max_nr_accesses = kdamond_check_accesses(ctx);
+ max_nr_accesses = ctx->check_accesses(ctx);
if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);