2 * Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "toolcontext.h"
27 #include <sys/resource.h>
29 #ifndef DEVMAPPER_SUPPORT
31 void memlock_inc(struct cmd_context *cmd)
35 void memlock_dec(struct cmd_context *cmd)
43 void memlock_init(struct cmd_context *cmd)
48 #else /* DEVMAPPER_SUPPORT */
50 static size_t _size_stack;
51 static size_t _size_malloc_tmp;
52 static size_t _size_malloc = 2000000;
54 static void *_malloc_mem = NULL;
55 static int _memlock_count = 0;
56 static int _memlock_count_daemon = 0;
58 static int _default_priority;
60 /* list of maps, that are unconditionaly ignored */
61 static const char * const _ignore_maps[] = {
66 /* default blacklist for maps */
67 static const char * const _blacklist_maps[] = {
68 "locale/locale-archive",
69 "gconv/gconv-modules.cache",
70 "/libreadline.so.", /* not using readline during mlock */
71 "/libncurses.so.", /* not using readline during mlock */
72 "/libdl-", /* not using dlopen,dlsym during mlock */
73 /* "/libdevmapper-event.so" */
76 typedef enum { LVM_MLOCK, LVM_MUNLOCK } lvmlock_t;
78 static unsigned _use_mlockall;
80 static size_t _maps_len = 8192; /* Initial buffer size for reading /proc/self/maps */
81 static char *_maps_buffer;
82 static char _procselfmaps[PATH_MAX] = "";
83 #define SELF_MAPS "/self/maps"
85 static size_t _mstats; /* statistic for maps locking */
87 static void _touch_memory(void *mem, size_t size)
89 size_t pagesize = lvm_getpagesize();
91 char *end = pos + size - sizeof(long);
99 static void _allocate_memory(void)
101 void *stack_mem, *temp_malloc_mem;
103 if ((stack_mem = alloca(_size_stack)))
104 _touch_memory(stack_mem, _size_stack);
106 if ((temp_malloc_mem = malloc(_size_malloc_tmp)))
107 _touch_memory(temp_malloc_mem, _size_malloc_tmp);
109 if ((_malloc_mem = malloc(_size_malloc)))
110 _touch_memory(_malloc_mem, _size_malloc);
112 free(temp_malloc_mem);
115 static void _release_memory(void)
121 * mlock/munlock memory areas from /proc/self/maps
122 * format described in kernel/Documentation/filesystem/proc.txt
124 static int _maps_line(const struct config_node *cn, lvmlock_t lock,
125 const char* line, size_t* mstats)
127 const struct config_value *cv;
133 if (sscanf(line, "%lx-%lx %c%c%c%c%n",
134 &from, &to, &fr, &fw, &fx, &fp, &pos) != 6) {
135 log_error("Failed to parse maps line: %s", line);
139 /* Select readable maps */
141 log_debug("%s area unreadable %s : Skipping.",
142 (lock == LVM_MLOCK) ? "mlock" : "munlock", line);
146 /* always ignored areas */
147 for (i = 0; i < sizeof(_ignore_maps) / sizeof(_ignore_maps[0]); ++i)
148 if (strstr(line + pos, _ignore_maps[i])) {
149 log_debug("mlock ignore filter '%s' matches '%s': Skipping.",
150 _ignore_maps[i], line);
156 /* If no blacklist configured, use an internal set */
157 for (i = 0; i < sizeof(_blacklist_maps) / sizeof(_blacklist_maps[0]); ++i)
158 if (strstr(line + pos, _blacklist_maps[i])) {
159 log_debug("mlock default filter '%s' matches '%s': Skipping.",
160 _blacklist_maps[i], line);
164 for (cv = cn->v; cv; cv = cv->next) {
165 if ((cv->type != CFG_STRING) || !cv->v.str[0])
167 if (strstr(line + pos, cv->v.str)) {
168 log_debug("mlock_filter '%s' matches '%s': Skipping.",
176 log_debug("%s %10ldKiB %12lx - %12lx %c%c%c%c%s",
177 (lock == LVM_MLOCK) ? "mlock" : "munlock",
178 ((long)sz + 1023) / 1024, from, to, fr, fw, fx, fp, line + pos);
180 if (lock == LVM_MLOCK) {
181 if (mlock((const void*)from, sz) < 0) {
182 log_sys_error("mlock", line);
186 if (munlock((const void*)from, sz) < 0) {
187 log_sys_error("munlock", line);
195 static int _memlock_maps(struct cmd_context *cmd, lvmlock_t lock, size_t *mstats)
197 const struct config_node *cn;
198 char *line, *line_end;
205 if (lock == LVM_MLOCK) {
206 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
207 log_sys_error("mlockall", "");
212 log_sys_error("munlockall", "");
222 /* Force libc.mo load */
223 if (lock == LVM_MLOCK)
225 /* Reset statistic counters */
228 /* read mapping into a single memory chunk without reallocation
229 * in the middle of reading maps file */
231 if (!_maps_buffer || len >= _maps_len) {
234 if (!(_maps_buffer = dm_realloc(_maps_buffer, _maps_len))) {
235 log_error("Allocation of maps buffer failed");
239 lseek(_maps_fd, 0, SEEK_SET);
240 for (len = 0 ; len < _maps_len; len += n) {
241 if (!(n = read(_maps_fd, _maps_buffer + len, _maps_len - len))) {
242 _maps_buffer[len] = '\0';
248 if (len < _maps_len) /* fits in buffer */
253 cn = find_config_tree_node(cmd, "activation/mlock_filter");
255 while ((line_end = strchr(line, '\n'))) {
256 *line_end = '\0'; /* remove \n */
257 if (!_maps_line(cn, lock, line, mstats))
262 log_debug("%socked %ld bytes",
263 (lock == LVM_MLOCK) ? "L" : "Unl", (long)*mstats);
268 /* Stop memory getting swapped out */
269 static void _lock_mem(struct cmd_context *cmd)
274 * For daemon we need to use mlockall()
275 * so even future adition of thread which may not even use lvm lib
276 * will not block memory locked thread
277 * Note: assuming _memlock_count_daemon is updated before _memlock_count
279 _use_mlockall = _memlock_count_daemon ? 1 :
280 find_config_tree_bool(cmd, "activation/use_mlockall", DEFAULT_USE_MLOCKALL);
282 if (!_use_mlockall) {
283 if (!*_procselfmaps &&
284 dm_snprintf(_procselfmaps, sizeof(_procselfmaps),
285 "%s" SELF_MAPS, cmd->proc_dir) < 0) {
286 log_error("proc_dir too long");
290 if (!(_maps_fd = open(_procselfmaps, O_RDONLY))) {
291 log_sys_error("open", _procselfmaps);
296 log_very_verbose("Locking memory");
297 if (!_memlock_maps(cmd, LVM_MLOCK, &_mstats))
301 if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
302 log_sys_error("getpriority", "");
304 if (setpriority(PRIO_PROCESS, 0, _default_priority))
305 log_error("setpriority %d failed: %s",
306 _default_priority, strerror(errno));
309 static void _unlock_mem(struct cmd_context *cmd)
311 size_t unlock_mstats;
313 log_very_verbose("Unlocking memory");
315 if (!_memlock_maps(cmd, LVM_MUNLOCK, &unlock_mstats))
318 if (!_use_mlockall) {
320 log_sys_error("close", _procselfmaps);
321 dm_free(_maps_buffer);
323 if (_mstats < unlock_mstats)
324 log_error(INTERNAL_ERROR "Maps lock %ld < unlock %ld",
325 (long)_mstats, (long)unlock_mstats);
328 if (setpriority(PRIO_PROCESS, 0, _priority))
329 log_error("setpriority %u failed: %s", _priority,
334 static void _lock_mem_if_needed(struct cmd_context *cmd)
336 if ((_memlock_count + _memlock_count_daemon) == 1)
340 static void _unlock_mem_if_possible(struct cmd_context *cmd)
342 if ((_memlock_count + _memlock_count_daemon) == 0)
346 void memlock_inc(struct cmd_context *cmd)
349 _lock_mem_if_needed(cmd);
350 log_debug("memlock_count inc to %d", _memlock_count);
353 void memlock_dec(struct cmd_context *cmd)
356 log_error(INTERNAL_ERROR "_memlock_count has dropped below 0.");
358 _unlock_mem_if_possible(cmd);
359 log_debug("memlock_count dec to %d", _memlock_count);
363 * The memlock_*_daemon functions will force the mlockall() call that we need
364 * to stay in memory, but they will have no effect on device scans (unlike
365 * normal memlock_inc and memlock_dec). Memory is kept locked as long as either
366 * of memlock or memlock_daemon is in effect.
369 void memlock_inc_daemon(struct cmd_context *cmd)
371 ++_memlock_count_daemon;
372 if (_memlock_count_daemon == 1 && _memlock_count > 0)
373 log_error(INTERNAL_ERROR "_memlock_inc_daemon used after _memlock_inc.");
374 _lock_mem_if_needed(cmd);
375 log_debug("memlock_count_daemon inc to %d", _memlock_count_daemon);
378 void memlock_dec_daemon(struct cmd_context *cmd)
380 if (!_memlock_count_daemon)
381 log_error(INTERNAL_ERROR "_memlock_count_daemon has dropped below 0.");
382 --_memlock_count_daemon;
383 _unlock_mem_if_possible(cmd);
384 log_debug("memlock_count_daemon dec to %d", _memlock_count_daemon);
388 * This disregards the daemon (dmeventd) locks, since we use memlock() to check
389 * whether it is safe to run a device scan, which would normally coincide with
390 * !memlock() -- but the daemon global memory lock breaks this assumption, so
391 * we do not take those into account here.
395 return _memlock_count;
398 void memlock_init(struct cmd_context *cmd)
400 _size_stack = find_config_tree_int(cmd,
401 "activation/reserved_stack",
402 DEFAULT_RESERVED_STACK) * 1024;
403 _size_malloc_tmp = find_config_tree_int(cmd,
404 "activation/reserved_memory",
405 DEFAULT_RESERVED_MEMORY) * 1024;
406 _default_priority = find_config_tree_int(cmd,
407 "activation/process_priority",
408 DEFAULT_PROCESS_PRIORITY);