new file mode 100644
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/random.h>
+#include <linux/types.h>
+
+/* Represents a monitoring target region on the virtual address space */
+struct damon_region {
+ unsigned long vm_start;
+ unsigned long vm_end;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+};
+
+/* Represents a monitoring target task */
+struct damon_task {
+ unsigned long pid;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+struct damon_ctx {
+ struct rnd_state rndseed;
+
+ struct list_head tasks_list; /* 'damon_task' objects */
+};
+
+#endif
@@ -739,4 +739,15 @@ config ARCH_HAS_HUGEPD
config MAPPING_DIRTY_HELPERS
bool
+config DAMON
+ tristate "Data Access Monitor"
+ depends on MMU
+ help
+ Provides data access monitoring.
+
+ DAMON is a kernel module that allows users to monitor the actual
+ memory access pattern of specific user-space processes. It aims to
+ be 1) accurate enough to be useful for performance-centric domains,
+ and 2) sufficiently light-weight so that it can be applied online.
+
endmenu
@@ -108,3 +108,4 @@ obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
+obj-$(CONFIG_DAMON) += damon.o
new file mode 100644
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data Access Monitor
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#define pr_fmt(fmt) "damon: " fmt
+
+#include <linux/damon.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define damon_get_task_struct(t) \
+ (get_pid_task(find_vpid(t->pid), PIDTYPE_PID))
+
+#define damon_next_region(r) \
+ (container_of(r->list.next, struct damon_region, list))
+
+#define damon_prev_region(r) \
+ (container_of(r->list.prev, struct damon_region, list))
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_task(ctx, t) \
+ list_for_each_entry(t, &(ctx)->tasks_list, list)
+
+#define damon_for_each_task_safe(ctx, t, next) \
+ list_for_each_entry_safe(t, next, &(ctx)->tasks_list, list)
+
+/* Get a random number in [l, r) */
+#define damon_rand(ctx, l, r) (l + prandom_u32_state(&ctx->rndseed) % (r - l))
+
+/*
+ * Construct a damon_region struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+static struct damon_region *damon_new_region(struct damon_ctx *ctx,
+ unsigned long vm_start, unsigned long vm_end)
+{
+ struct damon_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return NULL;
+
+ region->vm_start = vm_start;
+ region->vm_end = vm_end;
+ region->nr_accesses = 0;
+ region->sampling_addr = damon_rand(ctx, vm_start, vm_end);
+ INIT_LIST_HEAD(®ion->list);
+
+ return region;
+}
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+}
+
+static void damon_add_region(struct damon_region *r, struct damon_task *t)
+{
+ list_add_tail(&r->list, &t->regions_list);
+}
+
+static void damon_del_region(struct damon_region *r)
+{
+ list_del(&r->list);
+}
+
+static void damon_free_region(struct damon_region *r)
+{
+ kfree(r);
+}
+
+static void damon_destroy_region(struct damon_region *r)
+{
+ damon_del_region(r);
+ damon_free_region(r);
+}
+
+/*
+ * Construct a damon_task struct
+ *
+ * Returns the pointer to the new struct if success, or NULL otherwise
+ */
+static struct damon_task *damon_new_task(unsigned long pid)
+{
+ struct damon_task *t;
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return NULL;
+
+ t->pid = pid;
+ INIT_LIST_HEAD(&t->regions_list);
+
+ return t;
+}
+
+/* Returns n-th damon_region of the given task */
+struct damon_region *damon_nth_region_of(struct damon_task *t, unsigned int n)
+{
+ struct damon_region *r;
+ unsigned int i = 0;
+
+ damon_for_each_region(r, t) {
+ if (i++ == n)
+ return r;
+ }
+
+ return NULL;
+}
+
+static void damon_add_task(struct damon_ctx *ctx, struct damon_task *t)
+{
+ list_add_tail(&t->list, &ctx->tasks_list);
+}
+
+static void damon_del_task(struct damon_task *t)
+{
+ list_del(&t->list);
+}
+
+static void damon_free_task(struct damon_task *t)
+{
+ struct damon_region *r, *next;
+
+ damon_for_each_region_safe(r, next, t)
+ damon_free_region(r);
+ kfree(t);
+}
+
+static void damon_destroy_task(struct damon_task *t)
+{
+ damon_del_task(t);
+ damon_free_task(t);
+}
+
+static unsigned int nr_damon_tasks(struct damon_ctx *ctx)
+{
+ struct damon_task *t;
+ unsigned int nr_tasks = 0;
+
+ damon_for_each_task(ctx, t)
+ nr_tasks++;
+
+ return nr_tasks;
+}
+
+static unsigned int nr_damon_regions(struct damon_task *t)
+{
+ struct damon_region *r;
+ unsigned int nr_regions = 0;
+
+ damon_for_each_region(r, t)
+ nr_regions++;
+
+ return nr_regions;
+}
+
+static int __init damon_init(void)
+{
+ return 0;
+}
+
+static void __exit damon_exit(void)
+{
+ return;
+}
+
+module_init(damon_init);
+module_exit(damon_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SeongJae Park <sjpark@amazon.de>");
+MODULE_DESCRIPTION("DAMON: Data Access MONitor");