1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
29 * Construct a damon_region struct
31 * Returns the pointer to the new struct if success, or NULL otherwise
33 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
35 struct damon_region *region;
37 region = kmalloc(sizeof(*region), GFP_KERNEL);
41 region->ar.start = start;
43 region->nr_accesses = 0;
44 INIT_LIST_HEAD(®ion->list);
47 region->last_nr_accesses = 0;
53 * Add a region between two other regions
55 inline void damon_insert_region(struct damon_region *r,
56 struct damon_region *prev, struct damon_region *next,
57 struct damon_target *t)
59 __list_add(&r->list, &prev->list, &next->list);
63 void damon_add_region(struct damon_region *r, struct damon_target *t)
65 list_add_tail(&r->list, &t->regions_list);
69 static void damon_del_region(struct damon_region *r, struct damon_target *t)
75 static void damon_free_region(struct damon_region *r)
80 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
82 damon_del_region(r, t);
86 struct damos *damon_new_scheme(
87 unsigned long min_sz_region, unsigned long max_sz_region,
88 unsigned int min_nr_accesses, unsigned int max_nr_accesses,
89 unsigned int min_age_region, unsigned int max_age_region,
90 enum damos_action action, struct damos_quota *quota,
91 struct damos_watermarks *wmarks)
95 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
98 scheme->min_sz_region = min_sz_region;
99 scheme->max_sz_region = max_sz_region;
100 scheme->min_nr_accesses = min_nr_accesses;
101 scheme->max_nr_accesses = max_nr_accesses;
102 scheme->min_age_region = min_age_region;
103 scheme->max_age_region = max_age_region;
104 scheme->action = action;
105 scheme->stat_count = 0;
107 INIT_LIST_HEAD(&scheme->list);
109 scheme->quota.ms = quota->ms;
110 scheme->quota.sz = quota->sz;
111 scheme->quota.reset_interval = quota->reset_interval;
112 scheme->quota.weight_sz = quota->weight_sz;
113 scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
114 scheme->quota.weight_age = quota->weight_age;
115 scheme->quota.total_charged_sz = 0;
116 scheme->quota.total_charged_ns = 0;
117 scheme->quota.esz = 0;
118 scheme->quota.charged_sz = 0;
119 scheme->quota.charged_from = 0;
120 scheme->quota.charge_target_from = NULL;
121 scheme->quota.charge_addr_from = 0;
123 scheme->wmarks.metric = wmarks->metric;
124 scheme->wmarks.interval = wmarks->interval;
125 scheme->wmarks.high = wmarks->high;
126 scheme->wmarks.mid = wmarks->mid;
127 scheme->wmarks.low = wmarks->low;
128 scheme->wmarks.activated = true;
133 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
135 list_add_tail(&s->list, &ctx->schemes);
138 static void damon_del_scheme(struct damos *s)
143 static void damon_free_scheme(struct damos *s)
148 void damon_destroy_scheme(struct damos *s)
151 damon_free_scheme(s);
155 * Construct a damon_target struct
157 * Returns the pointer to the new struct if success, or NULL otherwise
159 struct damon_target *damon_new_target(unsigned long id)
161 struct damon_target *t;
163 t = kmalloc(sizeof(*t), GFP_KERNEL);
169 INIT_LIST_HEAD(&t->regions_list);
174 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
176 list_add_tail(&t->list, &ctx->adaptive_targets);
179 bool damon_targets_empty(struct damon_ctx *ctx)
181 return list_empty(&ctx->adaptive_targets);
184 static void damon_del_target(struct damon_target *t)
189 void damon_free_target(struct damon_target *t)
191 struct damon_region *r, *next;
193 damon_for_each_region_safe(r, next, t)
194 damon_free_region(r);
198 void damon_destroy_target(struct damon_target *t)
201 damon_free_target(t);
204 unsigned int damon_nr_regions(struct damon_target *t)
206 return t->nr_regions;
209 struct damon_ctx *damon_new_ctx(void)
211 struct damon_ctx *ctx;
213 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
217 ctx->sample_interval = 5 * 1000;
218 ctx->aggr_interval = 100 * 1000;
219 ctx->primitive_update_interval = 60 * 1000 * 1000;
221 ktime_get_coarse_ts64(&ctx->last_aggregation);
222 ctx->last_primitive_update = ctx->last_aggregation;
224 mutex_init(&ctx->kdamond_lock);
226 ctx->min_nr_regions = 10;
227 ctx->max_nr_regions = 1000;
229 INIT_LIST_HEAD(&ctx->adaptive_targets);
230 INIT_LIST_HEAD(&ctx->schemes);
235 static void damon_destroy_targets(struct damon_ctx *ctx)
237 struct damon_target *t, *next_t;
239 if (ctx->primitive.cleanup) {
240 ctx->primitive.cleanup(ctx);
244 damon_for_each_target_safe(t, next_t, ctx)
245 damon_destroy_target(t);
248 void damon_destroy_ctx(struct damon_ctx *ctx)
250 struct damos *s, *next_s;
252 damon_destroy_targets(ctx);
254 damon_for_each_scheme_safe(s, next_s, ctx)
255 damon_destroy_scheme(s);
261 * damon_set_targets() - Set monitoring targets.
262 * @ctx: monitoring context
263 * @ids: array of target ids
264 * @nr_ids: number of entries in @ids
266 * This function should not be called while the kdamond is running.
268 * Return: 0 on success, negative error code otherwise.
270 int damon_set_targets(struct damon_ctx *ctx,
271 unsigned long *ids, ssize_t nr_ids)
274 struct damon_target *t, *next;
276 damon_destroy_targets(ctx);
278 for (i = 0; i < nr_ids; i++) {
279 t = damon_new_target(ids[i]);
281 /* The caller should do cleanup of the ids itself */
282 damon_for_each_target_safe(t, next, ctx)
283 damon_destroy_target(t);
286 damon_add_target(ctx, t);
293 * damon_set_attrs() - Set attributes for the monitoring.
294 * @ctx: monitoring context
295 * @sample_int: time interval between samplings
296 * @aggr_int: time interval between aggregations
297 * @primitive_upd_int: time interval between monitoring primitive updates
298 * @min_nr_reg: minimal number of regions
299 * @max_nr_reg: maximum number of regions
301 * This function should not be called while the kdamond is running.
302 * Every time interval is in micro-seconds.
304 * Return: 0 on success, negative error code otherwise.
306 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
307 unsigned long aggr_int, unsigned long primitive_upd_int,
308 unsigned long min_nr_reg, unsigned long max_nr_reg)
312 if (min_nr_reg > max_nr_reg)
315 ctx->sample_interval = sample_int;
316 ctx->aggr_interval = aggr_int;
317 ctx->primitive_update_interval = primitive_upd_int;
318 ctx->min_nr_regions = min_nr_reg;
319 ctx->max_nr_regions = max_nr_reg;
325 * damon_set_schemes() - Set data access monitoring based operation schemes.
326 * @ctx: monitoring context
327 * @schemes: array of the schemes
328 * @nr_schemes: number of entries in @schemes
330 * This function should not be called while the kdamond of the context is
333 * Return: 0 if success, or negative error code otherwise.
335 int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
338 struct damos *s, *next;
341 damon_for_each_scheme_safe(s, next, ctx)
342 damon_destroy_scheme(s);
343 for (i = 0; i < nr_schemes; i++)
344 damon_add_scheme(ctx, schemes[i]);
349 * damon_nr_running_ctxs() - Return number of currently running contexts.
351 int damon_nr_running_ctxs(void)
355 mutex_lock(&damon_lock);
356 nr_ctxs = nr_running_ctxs;
357 mutex_unlock(&damon_lock);
362 /* Returns the size upper limit for each monitoring region */
363 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
365 struct damon_target *t;
366 struct damon_region *r;
367 unsigned long sz = 0;
369 damon_for_each_target(t, ctx) {
370 damon_for_each_region(r, t)
371 sz += r->ar.end - r->ar.start;
374 if (ctx->min_nr_regions)
375 sz /= ctx->min_nr_regions;
376 if (sz < DAMON_MIN_REGION)
377 sz = DAMON_MIN_REGION;
382 static int kdamond_fn(void *data);
385 * __damon_start() - Starts monitoring with given context.
386 * @ctx: monitoring context
388 * This function should be called while damon_lock is hold.
390 * Return: 0 on success, negative error code otherwise.
392 static int __damon_start(struct damon_ctx *ctx)
396 mutex_lock(&ctx->kdamond_lock);
399 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
401 if (IS_ERR(ctx->kdamond)) {
402 err = PTR_ERR(ctx->kdamond);
406 mutex_unlock(&ctx->kdamond_lock);
412 * damon_start() - Starts the monitorings for a given group of contexts.
413 * @ctxs: an array of the pointers for contexts to start monitoring
414 * @nr_ctxs: size of @ctxs
416 * This function starts a group of monitoring threads for a group of monitoring
417 * contexts. One thread per each context is created and run in parallel. The
418 * caller should handle synchronization between the threads by itself. If a
419 * group of threads that created by other 'damon_start()' call is currently
420 * running, this function does nothing but returns -EBUSY.
422 * Return: 0 on success, negative error code otherwise.
424 int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
429 mutex_lock(&damon_lock);
430 if (nr_running_ctxs) {
431 mutex_unlock(&damon_lock);
435 for (i = 0; i < nr_ctxs; i++) {
436 err = __damon_start(ctxs[i]);
441 mutex_unlock(&damon_lock);
447 * __damon_stop() - Stops monitoring of given context.
448 * @ctx: monitoring context
450 * Return: 0 on success, negative error code otherwise.
452 static int __damon_stop(struct damon_ctx *ctx)
454 struct task_struct *tsk;
456 mutex_lock(&ctx->kdamond_lock);
459 get_task_struct(tsk);
460 mutex_unlock(&ctx->kdamond_lock);
462 put_task_struct(tsk);
465 mutex_unlock(&ctx->kdamond_lock);
471 * damon_stop() - Stops the monitorings for a given group of contexts.
472 * @ctxs: an array of the pointers for contexts to stop monitoring
473 * @nr_ctxs: size of @ctxs
475 * Return: 0 on success, negative error code otherwise.
477 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
481 for (i = 0; i < nr_ctxs; i++) {
482 /* nr_running_ctxs is decremented in kdamond_fn */
483 err = __damon_stop(ctxs[i]);
492 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
493 * @baseline: the time to check whether the interval has elapsed since
494 * @interval: the time interval (microseconds)
496 * See whether the given time interval has passed since the given baseline
497 * time. If so, it also updates the baseline to current time for next check.
499 * Return: true if the time interval has passed, or false otherwise.
501 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
502 unsigned long interval)
504 struct timespec64 now;
506 ktime_get_coarse_ts64(&now);
507 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
515 * Check whether it is time to flush the aggregated information
517 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
519 return damon_check_reset_time_interval(&ctx->last_aggregation,
524 * Reset the aggregated monitoring results ('nr_accesses' of each region).
526 static void kdamond_reset_aggregated(struct damon_ctx *c)
528 struct damon_target *t;
530 damon_for_each_target(t, c) {
531 struct damon_region *r;
533 damon_for_each_region(r, t) {
534 trace_damon_aggregated(t, r, damon_nr_regions(t));
535 r->last_nr_accesses = r->nr_accesses;
541 static void damon_split_region_at(struct damon_ctx *ctx,
542 struct damon_target *t, struct damon_region *r,
545 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
549 sz = r->ar.end - r->ar.start;
550 return s->min_sz_region <= sz && sz <= s->max_sz_region &&
551 s->min_nr_accesses <= r->nr_accesses &&
552 r->nr_accesses <= s->max_nr_accesses &&
553 s->min_age_region <= r->age && r->age <= s->max_age_region;
556 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
557 struct damon_region *r, struct damos *s)
559 bool ret = __damos_valid_target(r, s);
561 if (!ret || !s->quota.esz || !c->primitive.get_scheme_score)
564 return c->primitive.get_scheme_score(c, t, r, s) >= s->quota.min_score;
567 static void damon_do_apply_schemes(struct damon_ctx *c,
568 struct damon_target *t,
569 struct damon_region *r)
573 damon_for_each_scheme(s, c) {
574 struct damos_quota *quota = &s->quota;
575 unsigned long sz = r->ar.end - r->ar.start;
576 struct timespec64 begin, end;
578 if (!s->wmarks.activated)
581 /* Check the quota */
582 if (quota->esz && quota->charged_sz >= quota->esz)
585 /* Skip previously charged regions */
586 if (quota->charge_target_from) {
587 if (t != quota->charge_target_from)
589 if (r == damon_last_region(t)) {
590 quota->charge_target_from = NULL;
591 quota->charge_addr_from = 0;
594 if (quota->charge_addr_from &&
595 r->ar.end <= quota->charge_addr_from)
598 if (quota->charge_addr_from && r->ar.start <
599 quota->charge_addr_from) {
600 sz = ALIGN_DOWN(quota->charge_addr_from -
601 r->ar.start, DAMON_MIN_REGION);
603 if (r->ar.end - r->ar.start <=
606 sz = DAMON_MIN_REGION;
608 damon_split_region_at(c, t, r, sz);
609 r = damon_next_region(r);
610 sz = r->ar.end - r->ar.start;
612 quota->charge_target_from = NULL;
613 quota->charge_addr_from = 0;
616 if (!damos_valid_target(c, t, r, s))
619 /* Apply the scheme */
620 if (c->primitive.apply_scheme) {
622 quota->charged_sz + sz > quota->esz) {
623 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
627 damon_split_region_at(c, t, r, sz);
629 ktime_get_coarse_ts64(&begin);
630 c->primitive.apply_scheme(c, t, r, s);
631 ktime_get_coarse_ts64(&end);
632 quota->total_charged_ns += timespec64_to_ns(&end) -
633 timespec64_to_ns(&begin);
634 quota->charged_sz += sz;
635 if (quota->esz && quota->charged_sz >= quota->esz) {
636 quota->charge_target_from = t;
637 quota->charge_addr_from = r->ar.end + 1;
640 if (s->action != DAMOS_STAT)
649 /* Shouldn't be called if quota->ms and quota->sz are zero */
650 static void damos_set_effective_quota(struct damos_quota *quota)
652 unsigned long throughput;
656 quota->esz = quota->sz;
660 if (quota->total_charged_ns)
661 throughput = quota->total_charged_sz * 1000000 /
662 quota->total_charged_ns;
664 throughput = PAGE_SIZE * 1024;
665 esz = throughput * quota->ms;
667 if (quota->sz && quota->sz < esz)
672 static void kdamond_apply_schemes(struct damon_ctx *c)
674 struct damon_target *t;
675 struct damon_region *r, *next_r;
678 damon_for_each_scheme(s, c) {
679 struct damos_quota *quota = &s->quota;
680 unsigned long cumulated_sz;
681 unsigned int score, max_score = 0;
683 if (!s->wmarks.activated)
686 if (!quota->ms && !quota->sz)
689 /* New charge window starts */
690 if (time_after_eq(jiffies, quota->charged_from +
692 quota->reset_interval))) {
693 quota->total_charged_sz += quota->charged_sz;
694 quota->charged_from = jiffies;
695 quota->charged_sz = 0;
696 damos_set_effective_quota(quota);
699 if (!c->primitive.get_scheme_score)
702 /* Fill up the score histogram */
703 memset(quota->histogram, 0, sizeof(quota->histogram));
704 damon_for_each_target(t, c) {
705 damon_for_each_region(r, t) {
706 if (!__damos_valid_target(r, s))
708 score = c->primitive.get_scheme_score(
710 quota->histogram[score] +=
711 r->ar.end - r->ar.start;
712 if (score > max_score)
717 /* Set the min score limit */
718 for (cumulated_sz = 0, score = max_score; ; score--) {
719 cumulated_sz += quota->histogram[score];
720 if (cumulated_sz >= quota->esz || !score)
723 quota->min_score = score;
726 damon_for_each_target(t, c) {
727 damon_for_each_region_safe(r, next_r, t)
728 damon_do_apply_schemes(c, t, r);
732 #define sz_damon_region(r) (r->ar.end - r->ar.start)
735 * Merge two adjacent regions into one region
737 static void damon_merge_two_regions(struct damon_target *t,
738 struct damon_region *l, struct damon_region *r)
740 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
742 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
744 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
745 l->ar.end = r->ar.end;
746 damon_destroy_region(r, t);
750 * Merge adjacent regions having similar access frequencies
752 * t target affected by this merge operation
753 * thres '->nr_accesses' diff threshold for the merge
754 * sz_limit size upper limit of each region
756 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
757 unsigned long sz_limit)
759 struct damon_region *r, *prev = NULL, *next;
761 damon_for_each_region_safe(r, next, t) {
762 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
767 if (prev && prev->ar.end == r->ar.start &&
768 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
769 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
770 damon_merge_two_regions(t, prev, r);
777 * Merge adjacent regions having similar access frequencies
779 * threshold '->nr_accesses' diff threshold for the merge
780 * sz_limit size upper limit of each region
782 * This function merges monitoring target regions which are adjacent and their
783 * access frequencies are similar. This is for minimizing the monitoring
784 * overhead under the dynamically changeable access pattern. If a merge was
785 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
787 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
788 unsigned long sz_limit)
790 struct damon_target *t;
792 damon_for_each_target(t, c)
793 damon_merge_regions_of(t, threshold, sz_limit);
797 * Split a region in two
799 * r the region to be split
800 * sz_r size of the first sub-region that will be made
802 static void damon_split_region_at(struct damon_ctx *ctx,
803 struct damon_target *t, struct damon_region *r,
806 struct damon_region *new;
808 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
812 r->ar.end = new->ar.start;
815 new->last_nr_accesses = r->last_nr_accesses;
817 damon_insert_region(new, r, damon_next_region(r), t);
820 /* Split every region in the given target into 'nr_subs' regions */
821 static void damon_split_regions_of(struct damon_ctx *ctx,
822 struct damon_target *t, int nr_subs)
824 struct damon_region *r, *next;
825 unsigned long sz_region, sz_sub = 0;
828 damon_for_each_region_safe(r, next, t) {
829 sz_region = r->ar.end - r->ar.start;
831 for (i = 0; i < nr_subs - 1 &&
832 sz_region > 2 * DAMON_MIN_REGION; i++) {
834 * Randomly select size of left sub-region to be at
835 * least 10 percent and at most 90% of original region
837 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
838 sz_region / 10, DAMON_MIN_REGION);
839 /* Do not allow blank region */
840 if (sz_sub == 0 || sz_sub >= sz_region)
843 damon_split_region_at(ctx, t, r, sz_sub);
850 * Split every target region into randomly-sized small regions
852 * This function splits every target region into random-sized small regions if
853 * current total number of the regions is equal or smaller than half of the
854 * user-specified maximum number of regions. This is for maximizing the
855 * monitoring accuracy under the dynamically changeable access patterns. If a
856 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
859 static void kdamond_split_regions(struct damon_ctx *ctx)
861 struct damon_target *t;
862 unsigned int nr_regions = 0;
863 static unsigned int last_nr_regions;
864 int nr_subregions = 2;
866 damon_for_each_target(t, ctx)
867 nr_regions += damon_nr_regions(t);
869 if (nr_regions > ctx->max_nr_regions / 2)
872 /* Maybe the middle of the region has different access frequency */
873 if (last_nr_regions == nr_regions &&
874 nr_regions < ctx->max_nr_regions / 3)
877 damon_for_each_target(t, ctx)
878 damon_split_regions_of(ctx, t, nr_subregions);
880 last_nr_regions = nr_regions;
884 * Check whether it is time to check and apply the target monitoring regions
886 * Returns true if it is.
888 static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
890 return damon_check_reset_time_interval(&ctx->last_primitive_update,
891 ctx->primitive_update_interval);
895 * Check whether current monitoring should be stopped
897 * The monitoring is stopped when either the user requested to stop, or all
898 * monitoring targets are invalid.
900 * Returns true if need to stop current monitoring.
902 static bool kdamond_need_stop(struct damon_ctx *ctx)
904 struct damon_target *t;
906 if (kthread_should_stop())
909 if (!ctx->primitive.target_valid)
912 damon_for_each_target(t, ctx) {
913 if (ctx->primitive.target_valid(t))
920 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
925 case DAMOS_WMARK_FREE_MEM_RATE:
927 return i.freeram * 1000 / i.totalram;
935 * Returns zero if the scheme is active. Else, returns time to wait for next
936 * watermark check in micro-seconds.
938 static unsigned long damos_wmark_wait_us(struct damos *scheme)
940 unsigned long metric;
942 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
945 metric = damos_wmark_metric_value(scheme->wmarks.metric);
946 /* higher than high watermark or lower than low watermark */
947 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
948 if (scheme->wmarks.activated)
949 pr_debug("deactivate a scheme (%d) for %s wmark\n",
951 metric > scheme->wmarks.high ?
953 scheme->wmarks.activated = false;
954 return scheme->wmarks.interval;
957 /* inactive and higher than middle watermark */
958 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
959 !scheme->wmarks.activated)
960 return scheme->wmarks.interval;
962 if (!scheme->wmarks.activated)
963 pr_debug("activate a scheme (%d)\n", scheme->action);
964 scheme->wmarks.activated = true;
968 static void kdamond_usleep(unsigned long usecs)
970 /* See Documentation/timers/timers-howto.rst for the thresholds */
971 if (usecs > 20 * USEC_PER_MSEC)
972 schedule_timeout_idle(usecs_to_jiffies(usecs));
974 usleep_idle_range(usecs, usecs + 1);
977 /* Returns negative error code if it's not activated but should return */
978 static int kdamond_wait_activation(struct damon_ctx *ctx)
981 unsigned long wait_time;
982 unsigned long min_wait_time = 0;
984 while (!kdamond_need_stop(ctx)) {
985 damon_for_each_scheme(s, ctx) {
986 wait_time = damos_wmark_wait_us(s);
987 if (!min_wait_time || wait_time < min_wait_time)
988 min_wait_time = wait_time;
993 kdamond_usleep(min_wait_time);
999 * The monitoring daemon that runs as a kernel thread
1001 static int kdamond_fn(void *data)
1003 struct damon_ctx *ctx = (struct damon_ctx *)data;
1004 struct damon_target *t;
1005 struct damon_region *r, *next;
1006 unsigned int max_nr_accesses = 0;
1007 unsigned long sz_limit = 0;
1010 pr_debug("kdamond (%d) starts\n", current->pid);
1012 if (ctx->primitive.init)
1013 ctx->primitive.init(ctx);
1014 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1017 sz_limit = damon_region_sz_limit(ctx);
1019 while (!kdamond_need_stop(ctx) && !done) {
1020 if (kdamond_wait_activation(ctx))
1023 if (ctx->primitive.prepare_access_checks)
1024 ctx->primitive.prepare_access_checks(ctx);
1025 if (ctx->callback.after_sampling &&
1026 ctx->callback.after_sampling(ctx))
1029 kdamond_usleep(ctx->sample_interval);
1031 if (ctx->primitive.check_accesses)
1032 max_nr_accesses = ctx->primitive.check_accesses(ctx);
1034 if (kdamond_aggregate_interval_passed(ctx)) {
1035 kdamond_merge_regions(ctx,
1036 max_nr_accesses / 10,
1038 if (ctx->callback.after_aggregation &&
1039 ctx->callback.after_aggregation(ctx))
1041 kdamond_apply_schemes(ctx);
1042 kdamond_reset_aggregated(ctx);
1043 kdamond_split_regions(ctx);
1044 if (ctx->primitive.reset_aggregated)
1045 ctx->primitive.reset_aggregated(ctx);
1048 if (kdamond_need_update_primitive(ctx)) {
1049 if (ctx->primitive.update)
1050 ctx->primitive.update(ctx);
1051 sz_limit = damon_region_sz_limit(ctx);
1054 damon_for_each_target(t, ctx) {
1055 damon_for_each_region_safe(r, next, t)
1056 damon_destroy_region(r, t);
1059 if (ctx->callback.before_terminate)
1060 ctx->callback.before_terminate(ctx);
1061 if (ctx->primitive.cleanup)
1062 ctx->primitive.cleanup(ctx);
1064 pr_debug("kdamond (%d) finishes\n", current->pid);
1065 mutex_lock(&ctx->kdamond_lock);
1066 ctx->kdamond = NULL;
1067 mutex_unlock(&ctx->kdamond_lock);
1069 mutex_lock(&damon_lock);
1071 mutex_unlock(&damon_lock);
1076 #include "core-test.h"