@@ -22,6 +22,11 @@ struct damon_region {
unsigned long sampling_addr;
unsigned int nr_accesses;
struct list_head list;
+
+ unsigned int age;
+ unsigned long last_vm_start;
+ unsigned long last_vm_end;
+ unsigned int last_nr_accesses;
};
/* Represents a monitoring target task */
@@ -87,6 +87,10 @@ static struct damon_region *damon_new_region(struct damon_ctx *ctx,
ret->sampling_addr = damon_rand(ctx, vm_start, vm_end);
INIT_LIST_HEAD(&ret->list);
+ ret->age = 0;
+ ret->last_vm_start = vm_start;
+ ret->last_vm_end = vm_end;
+
return ret;
}
@@ -600,11 +604,44 @@ static void kdamond_flush_aggregated(struct damon_ctx *c)
damon_write_rbuf(c, &r->vm_end, sizeof(r->vm_end));
damon_write_rbuf(c, &r->nr_accesses,
sizeof(r->nr_accesses));
+ r->last_nr_accesses = r->nr_accesses;
r->nr_accesses = 0;
}
}
}
+#define diff_of(a, b) (a > b ? a - b : b - a)
+
+/*
+ * Increase or reset the age of the given monitoring target region
+ *
+ * If the area or '->nr_accesses' has changed significantly, reset the '->age'.
+ * Else, increase the age.
+ */
+static void damon_do_count_age(struct damon_region *r, unsigned int threshold)
+{
+ unsigned long sz_threshold = (r->vm_end - r->vm_start) / 5;
+
+ if (diff_of(r->vm_start, r->last_vm_start) +
+ diff_of(r->vm_end, r->last_vm_end) > sz_threshold)
+ r->age = 0;
+ else if (diff_of(r->nr_accesses, r->last_nr_accesses) > threshold)
+ r->age = 0;
+ else
+ r->age++;
+}
+
+static void kdamond_count_age(struct damon_ctx *c, unsigned int threshold)
+{
+ struct damon_task *t;
+ struct damon_region *r;
+
+ damon_for_each_task(c, t) {
+ damon_for_each_region(r, t)
+ damon_do_count_age(r, threshold);
+ }
+}
+
#define sz_damon_region(r) (r->vm_end - r->vm_start)
/*
@@ -613,35 +650,90 @@ static void kdamond_flush_aggregated(struct damon_ctx *c)
static void damon_merge_two_regions(struct damon_region *l,
struct damon_region *r)
{
- l->nr_accesses = (l->nr_accesses * sz_damon_region(l) +
- r->nr_accesses * sz_damon_region(r)) /
- (sz_damon_region(l) + sz_damon_region(r));
+ unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
+
+ l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
+ (sz_l + sz_r);
+ l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
l->vm_end = r->vm_end;
damon_destroy_region(r);
}
-#define diff_of(a, b) (a > b ? a - b : b - a)
-
/*
* Merge adjacent regions having similar access frequencies
*
* t task that merge operation will make change
* thres merge regions having '->nr_accesses' diff smaller than this
+ *
+ * Merged new region's '->last_vm_(start|end)' will be those of the biggest
+ * subregion that merged into the new region. If a region is splitted into two
+ * segments and now both of the two segments are merged into the new region,
+ * the size of the subregion is the sum of the two segments. For example:
+ *
+ * < A >
+ * split
+ * < A >< A >
+ * merge
+ * < A >
+ *
+ * < A >< A >
+ * split
+ * < A ><A >< B ><B>
+ * merge
+ * < A >< B ><B>
+ *
+ * < A >< B >
+ * split
+ * < A >< A >< B >< B >
+ * merge
+ * < B >
+ *
+ * < A >< B >
+ * split
+ * < A ><A>< B >< B >
+ * merge
+ * < A >
*/
static void damon_merge_regions_of(struct damon_task *t, unsigned int thres)
{
struct damon_region *r, *prev = NULL, *next;
+ unsigned long sz_subregion, last_last_vm = 0;
+ unsigned long sz_biggest = 0; /* size of the biggest subregion */
+ struct region last_biggest; /* last region of the biggest sub */
damon_for_each_region_safe(r, next, t) {
if (!prev || prev->vm_end != r->vm_start)
goto next;
if (diff_of(prev->nr_accesses, r->nr_accesses) > thres)
goto next;
+ if (!sz_biggest) {
+ sz_biggest = sz_damon_region(prev);
+ last_biggest.start = prev->last_vm_start;
+ last_biggest.end = prev->last_vm_end;
+ }
+ if (last_last_vm != r->last_vm_start)
+ sz_subregion = 0;
+ sz_subregion += sz_damon_region(r);
+ last_last_vm = r->last_vm_start;
+ if (sz_subregion > sz_biggest) {
+ sz_biggest = sz_subregion;
+ last_biggest.start = r->last_vm_start;
+ last_biggest.end = r->last_vm_end;
+ }
damon_merge_two_regions(prev, r);
continue;
next:
+ if (sz_biggest) {
+ sz_biggest = 0;
+ prev->last_vm_start = last_biggest.start;
+ prev->last_vm_end = last_biggest.end;
+ }
prev = r;
}
+ if (sz_biggest) {
+ prev->last_vm_start = last_biggest.start;
+ prev->last_vm_end = last_biggest.end;
+ }
}
/*
@@ -674,6 +766,12 @@ static void damon_split_region_at(struct damon_ctx *ctx,
struct damon_region *new;
new = damon_new_region(ctx, r->vm_start + sz_r, r->vm_end);
+ new->age = r->age;
+ new->last_vm_start = r->vm_start;
+ new->last_nr_accesses = r->last_nr_accesses;
+
+ r->last_vm_start = r->vm_start;
+ r->last_vm_end = r->vm_end;
r->vm_end = new->vm_start;
damon_add_region(new, r, damon_next_region(r));
@@ -865,6 +963,7 @@ static int kdamond_fn(void *data)
if (kdamond_aggregate_interval_passed(ctx)) {
kdamond_merge_regions(ctx, max_nr_accesses / 10);
+ kdamond_count_age(ctx, max_nr_accesses / 10);
if (ctx->aggregate_cb)
ctx->aggregate_cb(ctx);
kdamond_flush_aggregated(ctx);