@@ -80,6 +80,15 @@ struct damon_task {
* @kdamond_lock. Accesses to other fields must be protected by themselves.
*
* @tasks_list: Head of monitoring target tasks (&damon_task) list.
+ *
+ * @sample_cb: Called for each sampling interval.
+ * @aggregate_cb: Called for each aggregation interval.
+ *
+ * @sample_cb and @aggregate_cb are called from @kdamond for each of the
+ * sampling intervals and aggregation intervals, respectively. Therefore,
+ * users can safely access to the monitoring results via @tasks_list without
+ * additional protection of @kdamond_lock. For the reason, users are
+ * recommended to use these callback for the accesses to the results.
*/
struct damon_ctx {
unsigned long sample_interval;
@@ -96,6 +105,10 @@ struct damon_ctx {
struct mutex kdamond_lock;
struct list_head tasks_list; /* 'damon_task' objects */
+
+ /* callbacks */
+ void (*sample_cb)(struct damon_ctx *context);
+ void (*aggregate_cb)(struct damon_ctx *context);
};
int damon_set_pids(struct damon_ctx *ctx, int *pids, ssize_t nr_pids);
@@ -880,6 +880,8 @@ static int kdamond_fn(void *data)
kdamond_init_regions(ctx);
while (!kdamond_need_stop(ctx)) {
kdamond_prepare_access_checks(ctx);
+ if (ctx->sample_cb)
+ ctx->sample_cb(ctx);
usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
@@ -887,6 +889,8 @@ static int kdamond_fn(void *data)
if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);
+ if (ctx->aggregate_cb)
+ ctx->aggregate_cb(ctx);
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
}