new file mode 100644
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+/* Represents a monitoring target region on the virtual address space */
+struct damon_region {
+ unsigned long vm_start;
+ unsigned long vm_end;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+};
+
+/* Represents a monitoring target task */
+struct damon_task {
+ unsigned long pid;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+struct damon_ctx {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long regions_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+
+ struct timespec64 last_aggregation;
+ struct timespec64 last_regions_update;
+
+ unsigned char *rbuf;
+ unsigned int rbuf_len;
+ unsigned int rbuf_offset;
+ char *rfile_path;
+
+ struct task_struct *kdamond;
+ bool kdamond_stop;
+ spinlock_t kdamond_lock;
+
+ struct rnd_state rndseed;
+
+ struct list_head tasks_list; /* 'damon_task' objects */
+
+ /* callbacks */
+ void (*sample_cb)(struct damon_ctx *context);
+ void (*aggregate_cb)(struct damon_ctx *context);
+};
+
+int damon_set_pids(struct damon_ctx *ctx,
+ unsigned long *pids, ssize_t nr_pids);
+int damon_set_recording(struct damon_ctx *ctx,
+ unsigned int rbuf_len, char *rfile_path);
+int damon_set_attrs(struct damon_ctx *ctx, unsigned long s, unsigned long a,
+ unsigned long r, unsigned long min, unsigned long max);
+int damon_start(struct damon_ctx *ctx);
+int damon_stop(struct damon_ctx *ctx);
+
+#endif
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "damon: " fmt
+#include <linux/damon.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
@@ -40,55 +41,6 @@
#define damon_for_each_task_safe(ctx, t, next) \
list_for_each_entry_safe(t, next, &(ctx)->tasks_list, list)
-/* Represents a monitoring target region on the virtual address space */
-struct damon_region {
- unsigned long vm_start;
- unsigned long vm_end;
- unsigned long sampling_addr;
- unsigned int nr_accesses;
- struct list_head list;
-};
-
-/* Represents a monitoring target task */
-struct damon_task {
- unsigned long pid;
- struct list_head regions_list;
- struct list_head list;
-};
-
-/*
- * For each 'sample_interval', DAMON checks whether each region is accessed or
- * not. It aggregates and keeps the access information (number of accesses to
- * each region) for 'aggr_interval' and then flushes it to the result buffer if
- * an 'aggr_interval' surpassed. And for each 'regions_update_interval', damon
- * checks whether the memory mapping of the target tasks has changed (e.g., by
- * mmap() calls from the applications) and applies the changes.
- *
- * All time intervals are in micro-seconds.
- */
-struct damon_ctx {
- unsigned long sample_interval;
- unsigned long aggr_interval;
- unsigned long regions_update_interval;
- unsigned long min_nr_regions;
- unsigned long max_nr_regions;
-
- struct timespec64 last_aggregation;
-
- unsigned char *rbuf;
- unsigned int rbuf_len;
- unsigned int rbuf_offset;
- char *rfile_path;
-
- struct task_struct *kdamond;
- bool kdamond_stop;
- spinlock_t kdamond_lock;
-
- struct rnd_state rndseed;
-
- struct list_head tasks_list; /* 'damon_task' objects */
-};
-
#define LEN_RES_FILE_PATH 256
/* Get a random number in [l, r) */
@@ -885,11 +837,15 @@ static int kdamond_fn(void *data)
}
mmput(mm);
}
+ if (ctx->sample_cb)
+ ctx->sample_cb(ctx);
if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);
kdamond_flush_aggregated(ctx);
kdamond_split_regions(ctx);
+ if (ctx->aggregate_cb)
+ ctx->aggregate_cb(ctx);
}
if (kdamond_need_update_regions(ctx))
@@ -952,6 +908,16 @@ static int damon_turn_kdamond(struct damon_ctx *ctx, bool on)
return 0;
}
+int damon_start(struct damon_ctx *ctx)
+{
+ return damon_turn_kdamond(ctx, true);
+}
+
+int damon_stop(struct damon_ctx *ctx)
+{
+ return damon_turn_kdamond(ctx, false);
+}
+
static inline bool damon_is_target_pid(struct damon_ctx *c, unsigned long pid)
{
struct damon_task *t;
@@ -966,7 +932,7 @@ static inline bool damon_is_target_pid(struct damon_ctx *c, unsigned long pid)
/*
* This function should not be called while the kdamond is running.
*/
-static int damon_set_pids(struct damon_ctx *ctx,
+int damon_set_pids(struct damon_ctx *ctx,
unsigned long *pids, ssize_t nr_pids)
{
ssize_t i;
@@ -1007,7 +973,7 @@ static int damon_set_pids(struct damon_ctx *ctx,
*
* Returns 0 on success, negative error code otherwise.
*/
-static int damon_set_recording(struct damon_ctx *ctx,
+int damon_set_recording(struct damon_ctx *ctx,
unsigned int rbuf_len, char *path_to_rfile)
{
size_t rfile_path_len;
@@ -1051,7 +1017,7 @@ static int damon_set_recording(struct damon_ctx *ctx,
*
* Returns 0 on success, negative error code otherwise.
*/
-static int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
+int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long regions_update_int,
unsigned long min_nr_reg, unsigned long max_nr_reg)
{