2 * uevent.c - trigger upon netlink uevents from the kernel
4 * Only kernels from version 2.6.10* on provide the uevent netlink socket.
5 * Until the libc-kernel-headers are updated, you need to compile with:
7 * gcc -I /lib/modules/`uname -r`/build/include -o uevent_listen uevent_listen.c
9 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation version 2 of the License.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program. If not, see <http://www.gnu.org/licenses/>.
34 #include <sys/socket.h>
38 #include <linux/types.h>
39 #include <linux/netlink.h>
54 #include "blacklist.h"
55 #include "devmapper.h"
57 #define MAX_ACCUMULATION_COUNT 2048
58 #define MAX_ACCUMULATION_TIME 30*1000
59 #define MIN_BURST_SPEED 10
61 typedef int (uev_trigger)(struct uevent *, void * trigger_data);
64 pthread_mutex_t uevq_lock = PTHREAD_MUTEX_INITIALIZER;
65 pthread_mutex_t *uevq_lockp = &uevq_lock;
66 pthread_cond_t uev_cond = PTHREAD_COND_INITIALIZER;
67 pthread_cond_t *uev_condp = &uev_cond;
68 uev_trigger *my_uev_trigger;
69 void * my_trigger_data;
72 int is_uevent_busy(void)
76 pthread_mutex_lock(uevq_lockp);
77 empty = list_empty(&uevq);
78 pthread_mutex_unlock(uevq_lockp);
79 return (!empty || servicing_uev);
82 struct uevent * alloc_uevent (void)
84 struct uevent *uev = MALLOC(sizeof(struct uevent));
87 INIT_LIST_HEAD(&uev->node);
88 INIT_LIST_HEAD(&uev->merge_node);
95 uevq_cleanup(struct list_head *tmpq)
97 struct uevent *uev, *tmp;
99 list_for_each_entry_safe(uev, tmp, tmpq, node) {
100 list_del_init(&uev->node);
103 udev_device_unref(uev->udev);
108 static const char* uevent_get_env_var(const struct uevent *uev,
112 const char *p = NULL;
121 for (i = 0; uev->envp[i] != NULL; i++) {
122 const char *var = uev->envp[i];
124 if (strlen(var) > len &&
125 !memcmp(var, attr, len) && var[len] == '=') {
131 condlog(4, "%s: %s -> '%s'", __func__, attr, p);
135 condlog(2, "%s: empty variable name", __func__);
139 static int uevent_get_env_positive_int(const struct uevent *uev,
142 const char *p = uevent_get_env_var(uev, attr);
146 if (p == NULL || *p == '\0')
149 ret = strtoul(p, &q, 10);
150 if (*q != '\0' || ret < 0) {
151 condlog(2, "%s: invalid %s: '%s'", __func__, attr, p);
158 uevent_get_wwid(struct uevent *uev)
162 struct config * conf;
164 conf = get_multipath_config();
165 pthread_cleanup_push(put_multipath_config, conf);
166 uid_attribute = parse_uid_attribute_by_attrs(conf->uid_attrs, uev->kernel);
167 pthread_cleanup_pop(1);
169 val = uevent_get_env_var(uev, uid_attribute);
176 uevent_need_merge(void)
178 struct config * conf;
179 bool need_merge = false;
181 conf = get_multipath_config();
184 put_multipath_config(conf);
190 uevent_can_discard(struct uevent *uev)
193 struct config * conf;
196 * do not filter dm devices by devnode
198 if (!strncmp(uev->kernel, "dm-", 3))
201 * filter paths devices by devnode
203 conf = get_multipath_config();
204 pthread_cleanup_push(put_multipath_config, conf);
205 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
208 pthread_cleanup_pop(1);
216 uevent_can_filter(struct uevent *earlier, struct uevent *later)
220 * filter earlier uvents if path has removed later. Eg:
221 * "add path1 |chang path1 |add path2 |remove path1"
223 * "add path2 |remove path1"
224 * uevents "add path1" and "chang path1" are filtered out
226 if (!strcmp(earlier->kernel, later->kernel) &&
227 !strcmp(later->action, "remove") &&
228 strncmp(later->kernel, "dm-", 3)) {
233 * filter change uvents if add uevents exist. Eg:
234 * "change path1| add path1 |add path2"
236 * "add path1 |add path2"
237 * uevent "chang path1" is filtered out
239 if (!strcmp(earlier->kernel, later->kernel) &&
240 !strcmp(earlier->action, "change") &&
241 !strcmp(later->action, "add") &&
242 strncmp(later->kernel, "dm-", 3)) {
250 merge_need_stop(struct uevent *earlier, struct uevent *later)
253 * dm uevent do not try to merge with left uevents
255 if (!strncmp(later->kernel, "dm-", 3))
259 * we can not make a jugement without wwid,
260 * so it is sensible to stop merging
262 if (!earlier->wwid || !later->wwid)
265 * uevents merging stopped
266 * when we meet an opposite action uevent from the same LUN to AVOID
267 * "add path1 |remove path1 |add path2 |remove path2 |add path3"
268 * to merge as "remove path1, path2" and "add path1, path2, path3"
270 * "remove path1 |add path1 |remove path2 |add path2 |remove path3"
271 * to merge as "add path1, path2" and "remove path1, path2, path3"
273 * when we meet a non-change uevent from the same LUN
274 * with the same wwid and different action
275 * it would be better to stop merging.
277 if (!strcmp(earlier->wwid, later->wwid) &&
278 strcmp(earlier->action, later->action) &&
279 strcmp(earlier->action, "change") &&
280 strcmp(later->action, "change"))
287 uevent_can_merge(struct uevent *earlier, struct uevent *later)
289 /* merge paths uevents
290 * whose wwids exsit and are same
291 * and actions are same,
292 * and actions are addition or deletion
294 if (earlier->wwid && later->wwid &&
295 !strcmp(earlier->wwid, later->wwid) &&
296 !strcmp(earlier->action, later->action) &&
297 strncmp(earlier->action, "change", 6) &&
298 strncmp(earlier->kernel, "dm-", 3)) {
306 uevent_prepare(struct list_head *tmpq)
308 struct uevent *uev, *tmp;
310 list_for_each_entry_reverse_safe(uev, tmp, tmpq, node) {
311 if (uevent_can_discard(uev)) {
312 list_del_init(&uev->node);
314 udev_device_unref(uev->udev);
319 if (strncmp(uev->kernel, "dm-", 3) &&
321 uevent_get_wwid(uev);
326 uevent_filter(struct uevent *later, struct list_head *tmpq)
328 struct uevent *earlier, *tmp;
330 list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
332 * filter unnessary earlier uevents
333 * by the later uevent
335 if (uevent_can_filter(earlier, later)) {
336 condlog(3, "uevent: %s-%s has filtered by uevent: %s-%s",
337 earlier->kernel, earlier->action,
338 later->kernel, later->action);
340 list_del_init(&earlier->node);
342 udev_device_unref(earlier->udev);
349 uevent_merge(struct uevent *later, struct list_head *tmpq)
351 struct uevent *earlier, *tmp;
353 list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
354 if (merge_need_stop(earlier, later))
357 * merge earlier uevents to the later uevent
359 if (uevent_can_merge(earlier, later)) {
360 condlog(3, "merged uevent: %s-%s-%s with uevent: %s-%s-%s",
361 earlier->action, earlier->kernel, earlier->wwid,
362 later->action, later->kernel, later->wwid);
364 list_move(&earlier->node, &later->merge_node);
370 merge_uevq(struct list_head *tmpq)
372 struct uevent *later;
374 uevent_prepare(tmpq);
375 list_for_each_entry_reverse(later, tmpq, node) {
376 uevent_filter(later, tmpq);
377 if(uevent_need_merge())
378 uevent_merge(later, tmpq);
383 service_uevq(struct list_head *tmpq)
385 struct uevent *uev, *tmp;
387 list_for_each_entry_safe(uev, tmp, tmpq, node) {
388 list_del_init(&uev->node);
390 if (my_uev_trigger && my_uev_trigger(uev, my_trigger_data))
391 condlog(0, "uevent trigger error");
393 uevq_cleanup(&uev->merge_node);
396 udev_device_unref(uev->udev);
401 static void uevent_cleanup(void *arg)
403 struct udev *udev = arg;
405 condlog(3, "Releasing uevent_listen() resources");
409 static void monitor_cleanup(void *arg)
411 struct udev_monitor *monitor = arg;
413 condlog(3, "Releasing uevent_monitor() resources");
414 udev_monitor_unref(monitor);
418 * Service the uevent queue.
420 int uevent_dispatch(int (*uev_trigger)(struct uevent *, void * trigger_data),
423 my_uev_trigger = uev_trigger;
424 my_trigger_data = trigger_data;
426 mlockall(MCL_CURRENT | MCL_FUTURE);
431 pthread_mutex_lock(uevq_lockp);
434 * Condition signals are unreliable,
435 * so make sure we only wait if we have to.
437 if (list_empty(&uevq)) {
438 pthread_cond_wait(uev_condp, uevq_lockp);
441 list_splice_init(&uevq, &uevq_tmp);
442 pthread_mutex_unlock(uevq_lockp);
445 merge_uevq(&uevq_tmp);
446 service_uevq(&uevq_tmp);
448 condlog(3, "Terminating uev service queue");
453 struct uevent *uevent_from_buffer(char *buf, ssize_t buflen)
461 uev = alloc_uevent();
463 condlog(1, "lost uevent, oom");
467 if ((size_t)buflen > sizeof(buf)-1)
468 buflen = sizeof(buf)-1;
471 * Copy the shared receive buffer contents to buffer private
472 * to this uevent so we can immediately reuse the shared buffer.
474 memcpy(uev->buffer, buf, HOTPLUG_BUFFER_SIZE + OBJECT_SIZE);
475 buffer = uev->buffer;
476 buffer[buflen] = '\0';
478 /* save start of payload */
479 bufpos = strlen(buffer) + 1;
482 uev->action = buffer;
483 pos = strchr(buffer, '@');
485 condlog(3, "bad action string '%s'", buffer);
492 uev->devpath = &pos[1];
494 /* hotplug events have the environment attached - reconstruct envp[] */
495 for (i = 0; (bufpos < (size_t)buflen) && (i < HOTPLUG_NUM_ENVP-1); i++) {
499 key = &buffer[bufpos];
500 keylen = strlen(key);
502 /* Filter out sequence number */
503 if (strncmp(key, "SEQNUM=", 7) == 0) {
506 uev->seqnum = strtoul(key + 7, &eptr, 10);
510 bufpos += keylen + 1;
514 condlog(3, "uevent %ld '%s' from '%s'", uev->seqnum,
515 uev->action, uev->devpath);
516 uev->kernel = strrchr(uev->devpath, '/');
520 /* print payload environment */
521 for (i = 0; uev->envp[i] != NULL; i++)
522 condlog(5, "%s", uev->envp[i]);
527 int failback_listen(void)
530 struct sockaddr_nl snl;
531 struct sockaddr_un sun;
534 int rcvbufsz = 128*1024;
536 int rcvszsz = sizeof(rcvsz);
537 unsigned int *prcvszsz = (unsigned int *)&rcvszsz;
538 const int feature_on = 1;
540 * First check whether we have a udev socket
542 memset(&sun, 0x00, sizeof(struct sockaddr_un));
543 sun.sun_family = AF_LOCAL;
544 strcpy(&sun.sun_path[1], "/org/kernel/dm/multipath_event");
545 addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(sun.sun_path+1) + 1;
547 sock = socket(AF_LOCAL, SOCK_DGRAM, 0);
550 condlog(3, "reading events from udev socket.");
552 /* the bind takes care of ensuring only one copy running */
553 retval = bind(sock, (struct sockaddr *) &sun, addrlen);
555 condlog(0, "bind failed, exit");
559 /* enable receiving of the sender credentials */
560 retval = setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
561 &feature_on, sizeof(feature_on));
563 condlog(0, "failed to enable credential passing, exit");
568 /* Fallback to read kernel netlink events */
569 memset(&snl, 0x00, sizeof(struct sockaddr_nl));
570 snl.nl_family = AF_NETLINK;
571 snl.nl_pid = getpid();
572 snl.nl_groups = 0x01;
574 sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
576 condlog(0, "error getting socket, exit");
580 condlog(3, "reading events from kernel.");
583 * try to avoid dropping uevents, even so, this is not a guarantee,
584 * but it does help to change the netlink uevent socket's
585 * receive buffer threshold from the default value of 106,496 to
586 * the maximum value of 262,142.
588 retval = setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvbufsz,
592 condlog(0, "error setting receive buffer size for socket, exit");
595 retval = getsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvsz, prcvszsz);
597 condlog(0, "error setting receive buffer size for socket, exit");
600 condlog(3, "receive buffer size for socket is %u.", rcvsz);
602 /* enable receiving of the sender credentials */
603 if (setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
604 &feature_on, sizeof(feature_on)) < 0) {
605 condlog(0, "error on enabling credential passing for socket");
609 retval = bind(sock, (struct sockaddr *) &snl,
610 sizeof(struct sockaddr_nl));
612 condlog(0, "bind failed, exit");
623 char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
624 struct cmsghdr *cmsg;
626 static char buf[HOTPLUG_BUFFER_SIZE + OBJECT_SIZE];
628 memset(buf, 0x00, sizeof(buf));
630 iov.iov_len = sizeof(buf);
631 memset (&smsg, 0x00, sizeof(struct msghdr));
634 smsg.msg_control = cred_msg;
635 smsg.msg_controllen = sizeof(cred_msg);
637 buflen = recvmsg(sock, &smsg, 0);
640 condlog(0, "error receiving message, errno %d", errno);
644 cmsg = CMSG_FIRSTHDR(&smsg);
645 if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
646 condlog(3, "no sender credentials received, message ignored");
650 cred = (struct ucred *)CMSG_DATA(cmsg);
651 if (cred->uid != 0) {
652 condlog(3, "sender uid=%d, message ignored", cred->uid);
657 bufpos = strlen(buf) + 1;
658 if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
659 condlog(3, "invalid message length");
663 /* check message header */
664 if (strstr(buf, "@/") == NULL) {
665 condlog(3, "unrecognized message header");
668 if ((size_t)buflen > sizeof(buf)-1) {
669 condlog(2, "buffer overflow for received uevent");
670 buflen = sizeof(buf)-1;
673 uev = uevent_from_buffer(buf, buflen);
677 * Queue uevent and poke service pthread.
679 pthread_mutex_lock(uevq_lockp);
680 list_add_tail(&uev->node, &uevq);
681 pthread_cond_signal(uev_condp);
682 pthread_mutex_unlock(uevq_lockp);
690 struct uevent *uevent_from_udev_device(struct udev_device *dev)
695 struct udev_list_entry *list_entry;
697 uev = alloc_uevent();
699 udev_device_unref(dev);
700 condlog(1, "lost uevent, oom");
704 end = pos + HOTPLUG_BUFFER_SIZE + OBJECT_SIZE - 1;
705 udev_list_entry_foreach(list_entry, udev_device_get_properties_list_entry(dev)) {
706 const char *name, *value;
709 name = udev_list_entry_get_name(list_entry);
712 value = udev_list_entry_get_value(list_entry);
715 bytes = snprintf(pos, end - pos, "%s=%s", name, value);
716 if (pos + bytes >= end) {
717 condlog(2, "buffer overflow for uevent");
724 if (strcmp(name, "DEVPATH") == 0)
725 uev->devpath = uev->envp[i] + 8;
726 if (strcmp(name, "ACTION") == 0)
727 uev->action = uev->envp[i] + 7;
729 if (i == HOTPLUG_NUM_ENVP - 1)
735 condlog(3, "uevent '%s' from '%s'", uev->action, uev->devpath);
736 uev->kernel = strrchr(uev->devpath, '/');
740 /* print payload environment */
741 for (i = 0; uev->envp[i] != NULL; i++)
742 condlog(5, "%s", uev->envp[i]);
746 bool uevent_burst(struct timeval *start_time, int events)
748 struct timeval diff_time, end_time;
750 unsigned long eclipse_ms;
752 if(events > MAX_ACCUMULATION_COUNT) {
753 condlog(2, "burst got %u uevents, too much uevents, stopped", events);
757 gettimeofday(&end_time, NULL);
758 timersub(&end_time, start_time, &diff_time);
760 eclipse_ms = diff_time.tv_sec * 1000 + diff_time.tv_usec / 1000;
765 if (eclipse_ms > MAX_ACCUMULATION_TIME) {
766 condlog(2, "burst continued %lu ms, too long time, stopped", eclipse_ms);
770 speed = (events * 1000) / eclipse_ms;
771 if (speed > MIN_BURST_SPEED)
777 int uevent_listen(struct udev *udev)
780 struct udev_monitor *monitor = NULL;
781 int fd, socket_flags, events;
782 struct timeval start_time;
783 int need_failback = 1;
785 LIST_HEAD(uevlisten_tmp);
788 * Queue uevents for service by dedicated thread so that the uevent
789 * listening thread does not block on multipathd locks (vecs->lock)
790 * thereby not getting to empty the socket's receive buffer queue
794 condlog(1, "no udev context");
798 pthread_cleanup_push(uevent_cleanup, udev);
800 monitor = udev_monitor_new_from_netlink(udev, "udev");
802 condlog(2, "failed to create udev monitor");
805 pthread_cleanup_push(monitor_cleanup, monitor);
806 #ifdef LIBUDEV_API_RECVBUF
807 if (udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024))
808 condlog(2, "failed to increase buffer size");
810 fd = udev_monitor_get_fd(monitor);
812 condlog(2, "failed to get monitor fd");
815 socket_flags = fcntl(fd, F_GETFL);
816 if (socket_flags < 0) {
817 condlog(2, "failed to get monitor socket flags : %s",
821 if (fcntl(fd, F_SETFL, socket_flags & ~O_NONBLOCK) < 0) {
822 condlog(2, "failed to set monitor socket flags : %s",
826 err = udev_monitor_filter_add_match_subsystem_devtype(monitor, "block",
829 condlog(2, "failed to create filter : %s", strerror(-err));
830 err = udev_monitor_enable_receiving(monitor);
832 condlog(2, "failed to enable receiving : %s", strerror(-err));
837 gettimeofday(&start_time, NULL);
840 struct udev_device *dev;
841 struct pollfd ev_poll;
845 memset(&ev_poll, 0, sizeof(struct pollfd));
847 ev_poll.events = POLLIN;
848 poll_timeout = timeout * 1000;
850 fdcount = poll(&ev_poll, 1, poll_timeout);
851 if (fdcount && ev_poll.revents & POLLIN) {
852 timeout = uevent_burst(&start_time, events + 1) ? 1 : 0;
853 dev = udev_monitor_receive_device(monitor);
855 condlog(0, "failed getting udev device");
858 uev = uevent_from_udev_device(dev);
861 list_add_tail(&uev->node, &uevlisten_tmp);
869 condlog(0, "error receiving "
870 "uevent message: %m");
874 if (!list_empty(&uevlisten_tmp)) {
876 * Queue uevents and poke service pthread.
878 condlog(3, "Forwarding %d uevents", events);
879 pthread_mutex_lock(uevq_lockp);
880 list_splice_tail_init(&uevlisten_tmp, &uevq);
881 pthread_cond_signal(uev_condp);
882 pthread_mutex_unlock(uevq_lockp);
885 gettimeofday(&start_time, NULL);
891 pthread_cleanup_pop(1);
893 err = failback_listen();
894 pthread_cleanup_pop(1);
898 int uevent_get_major(const struct uevent *uev)
900 return uevent_get_env_positive_int(uev, "MAJOR");
903 int uevent_get_minor(const struct uevent *uev)
905 return uevent_get_env_positive_int(uev, "MINOR");
908 int uevent_get_disk_ro(const struct uevent *uev)
910 return uevent_get_env_positive_int(uev, "DISK_RO");
913 static char *uevent_get_dm_str(const struct uevent *uev, char *attr)
915 const char *tmp = uevent_get_env_var(uev, attr);
922 char *uevent_get_dm_name(const struct uevent *uev)
924 return uevent_get_dm_str(uev, "DM_NAME");
927 char *uevent_get_dm_path(const struct uevent *uev)
929 return uevent_get_dm_str(uev, "DM_PATH");
932 char *uevent_get_dm_action(const struct uevent *uev)
934 return uevent_get_dm_str(uev, "DM_ACTION");
937 bool uevent_is_mpath(const struct uevent *uev)
939 const char *uuid = uevent_get_env_var(uev, "DM_UUID");
943 if (strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN))
945 return uuid[UUID_PREFIX_LEN] != '\0';