1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON-based LRU-lists Sorting
5 * Author: SeongJae Park <sj@kernel.org>
8 #define pr_fmt(fmt) "damon-lru-sort: " fmt
10 #include <linux/damon.h>
11 #include <linux/ioport.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/workqueue.h>
16 #include "modules-common.h"
18 #ifdef MODULE_PARAM_PREFIX
19 #undef MODULE_PARAM_PREFIX
21 #define MODULE_PARAM_PREFIX "damon_lru_sort."
24 * Enable or disable DAMON_LRU_SORT.
26 * You can enable DAMON_LRU_SORT by setting the value of this parameter as
27 * ``Y``. Setting it as ``N`` disables DAMON_LRU_SORT. Note that
28 * DAMON_LRU_SORT could do no real monitoring and LRU-lists sorting due to the
29 * watermarks-based activation condition. Refer to below descriptions for the
30 * watermarks parameter for this.
32 static bool enabled __read_mostly;
35 * Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``.
37 * Input parameters that updated while DAMON_LRU_SORT is running are not
38 * applied by default. Once this parameter is set as ``Y``, DAMON_LRU_SORT
39 * reads values of parametrs except ``enabled`` again. Once the re-reading is
40 * done, this parameter is set as ``N``. If invalid parameters are found while
41 * the re-reading, DAMON_LRU_SORT will be disabled.
43 static bool commit_inputs __read_mostly;
44 module_param(commit_inputs, bool, 0600);
47 * Access frequency threshold for hot memory regions identification in permil.
49 * If a memory region is accessed in frequency of this or higher,
50 * DAMON_LRU_SORT identifies the region as hot, and mark it as accessed on the
51 * LRU list, so that it could not be reclaimed under memory pressure. 50% by
54 static unsigned long hot_thres_access_freq = 500;
55 module_param(hot_thres_access_freq, ulong, 0600);
58 * Time threshold for cold memory regions identification in microseconds.
60 * If a memory region is not accessed for this or longer time, DAMON_LRU_SORT
61 * identifies the region as cold, and mark it as unaccessed on the LRU list, so
62 * that it could be reclaimed first under memory pressure. 120 seconds by
65 static unsigned long cold_min_age __read_mostly = 120000000;
66 module_param(cold_min_age, ulong, 0600);
69 * Limit of time for trying the LRU lists sorting in milliseconds.
71 * DAMON_LRU_SORT tries to use only up to this time within a time window
72 * (quota_reset_interval_ms) for trying LRU lists sorting. This can be used
73 * for limiting CPU consumption of DAMON_LRU_SORT. If the value is zero, the
78 static unsigned long quota_ms __read_mostly = 10;
79 module_param(quota_ms, ulong, 0600);
82 * The time quota charge reset interval in milliseconds.
84 * The charge reset interval for the quota of time (quota_ms). That is,
85 * DAMON_LRU_SORT does not try LRU-lists sorting for more than quota_ms
86 * milliseconds or quota_sz bytes within quota_reset_interval_ms milliseconds.
88 * 1 second by default.
90 static unsigned long quota_reset_interval_ms __read_mostly = 1000;
91 module_param(quota_reset_interval_ms, ulong, 0600);
93 struct damos_watermarks damon_lru_sort_wmarks = {
94 .metric = DAMOS_WMARK_FREE_MEM_RATE,
95 .interval = 5000000, /* 5 seconds */
96 .high = 200, /* 20 percent */
97 .mid = 150, /* 15 percent */
98 .low = 50, /* 5 percent */
100 DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks);
102 static struct damon_attrs damon_lru_sort_mon_attrs = {
103 .sample_interval = 5000, /* 5 ms */
104 .aggr_interval = 100000, /* 100 ms */
105 .ops_update_interval = 0,
106 .min_nr_regions = 10,
107 .max_nr_regions = 1000,
109 DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs);
112 * Start of the target memory region in physical address.
114 * The start physical address of memory region that DAMON_LRU_SORT will do work
115 * against. By default, biggest System RAM is used as the region.
117 static unsigned long monitor_region_start __read_mostly;
118 module_param(monitor_region_start, ulong, 0600);
121 * End of the target memory region in physical address.
123 * The end physical address of memory region that DAMON_LRU_SORT will do work
124 * against. By default, biggest System RAM is used as the region.
126 static unsigned long monitor_region_end __read_mostly;
127 module_param(monitor_region_end, ulong, 0600);
130 * PID of the DAMON thread
132 * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
135 static int kdamond_pid __read_mostly = -1;
136 module_param(kdamond_pid, int, 0400);
138 static struct damos_stat damon_lru_sort_hot_stat;
139 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
140 lru_sort_tried_hot_regions, lru_sorted_hot_regions,
143 static struct damos_stat damon_lru_sort_cold_stat;
144 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat,
145 lru_sort_tried_cold_regions, lru_sorted_cold_regions,
148 static struct damon_ctx *ctx;
149 static struct damon_target *target;
151 /* Create a DAMON-based operation scheme for hot memory regions */
152 static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
154 struct damos_access_pattern pattern = {
155 /* Find regions having PAGE_SIZE or larger size */
156 .min_sz_region = PAGE_SIZE,
157 .max_sz_region = ULONG_MAX,
158 /* and accessed for more than the threshold */
159 .min_nr_accesses = hot_thres,
160 .max_nr_accesses = UINT_MAX,
161 /* no matter its age */
163 .max_age_region = UINT_MAX,
165 struct damos_quota quota = {
167 * Do not try LRU-lists sorting of hot pages for more than half
168 * of quota_ms milliseconds within quota_reset_interval_ms.
172 .reset_interval = quota_reset_interval_ms,
173 /* Within the quota, mark hotter regions accessed first. */
175 .weight_nr_accesses = 1,
179 return damon_new_scheme(
181 /* prioritize those on LRU lists, as soon as found */
183 /* under the quota. */
185 /* (De)activate this according to the watermarks. */
186 &damon_lru_sort_wmarks);
189 /* Create a DAMON-based operation scheme for cold memory regions */
190 static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
192 struct damos_access_pattern pattern = {
193 /* Find regions having PAGE_SIZE or larger size */
194 .min_sz_region = PAGE_SIZE,
195 .max_sz_region = ULONG_MAX,
196 /* and not accessed at all */
197 .min_nr_accesses = 0,
198 .max_nr_accesses = 0,
199 /* for min_age or more micro-seconds */
200 .min_age_region = cold_thres,
201 .max_age_region = UINT_MAX,
203 struct damos_quota quota = {
205 * Do not try LRU-lists sorting of cold pages for more than
206 * half of quota_ms milliseconds within
207 * quota_reset_interval_ms.
211 .reset_interval = quota_reset_interval_ms,
212 /* Within the quota, mark colder regions not accessed first. */
214 .weight_nr_accesses = 0,
218 return damon_new_scheme(
220 /* mark those as not accessed, as soon as found */
222 /* under the quota. */
224 /* (De)activate this according to the watermarks. */
225 &damon_lru_sort_wmarks);
228 static int damon_lru_sort_apply_parameters(void)
230 struct damos *scheme;
231 struct damon_addr_range addr_range;
232 unsigned int hot_thres, cold_thres;
235 err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
239 /* aggr_interval / sample_interval is the maximum nr_accesses */
240 hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
241 damon_lru_sort_mon_attrs.sample_interval *
242 hot_thres_access_freq / 1000;
243 scheme = damon_lru_sort_new_hot_scheme(hot_thres);
246 err = damon_set_schemes(ctx, &scheme, 1);
250 cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
251 scheme = damon_lru_sort_new_cold_scheme(cold_thres);
254 damon_add_scheme(ctx, scheme);
256 if (monitor_region_start > monitor_region_end)
258 if (!monitor_region_start && !monitor_region_end &&
259 !damon_find_biggest_system_ram(&monitor_region_start,
260 &monitor_region_end))
262 addr_range.start = monitor_region_start;
263 addr_range.end = monitor_region_end;
264 return damon_set_regions(target, &addr_range, 1);
267 static int damon_lru_sort_turn(bool on)
272 err = damon_stop(&ctx, 1);
278 err = damon_lru_sort_apply_parameters();
282 err = damon_start(&ctx, 1, true);
285 kdamond_pid = ctx->kdamond->pid;
289 static struct delayed_work damon_lru_sort_timer;
290 static void damon_lru_sort_timer_fn(struct work_struct *work)
292 static bool last_enabled;
295 now_enabled = enabled;
296 if (last_enabled != now_enabled) {
297 if (!damon_lru_sort_turn(now_enabled))
298 last_enabled = now_enabled;
300 enabled = last_enabled;
303 static DECLARE_DELAYED_WORK(damon_lru_sort_timer, damon_lru_sort_timer_fn);
305 static bool damon_lru_sort_initialized;
307 static int damon_lru_sort_enabled_store(const char *val,
308 const struct kernel_param *kp)
310 int rc = param_set_bool(val, kp);
315 if (!damon_lru_sort_initialized)
318 schedule_delayed_work(&damon_lru_sort_timer, 0);
323 static const struct kernel_param_ops enabled_param_ops = {
324 .set = damon_lru_sort_enabled_store,
325 .get = param_get_bool,
328 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
329 MODULE_PARM_DESC(enabled,
330 "Enable or disable DAMON_LRU_SORT (default: disabled)");
332 static int damon_lru_sort_handle_commit_inputs(void)
339 err = damon_lru_sort_apply_parameters();
340 commit_inputs = false;
344 static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
348 /* update the stats parameter */
349 damon_for_each_scheme(s, c) {
350 if (s->action == DAMOS_LRU_PRIO)
351 damon_lru_sort_hot_stat = s->stat;
352 else if (s->action == DAMOS_LRU_DEPRIO)
353 damon_lru_sort_cold_stat = s->stat;
356 return damon_lru_sort_handle_commit_inputs();
359 static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c)
361 return damon_lru_sort_handle_commit_inputs();
364 static int __init damon_lru_sort_init(void)
366 ctx = damon_new_ctx();
370 if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
371 damon_destroy_ctx(ctx);
375 ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
376 ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
378 target = damon_new_target();
380 damon_destroy_ctx(ctx);
383 damon_add_target(ctx, target);
385 schedule_delayed_work(&damon_lru_sort_timer, 0);
387 damon_lru_sort_initialized = true;
391 module_init(damon_lru_sort_init);