2 * uevent.c - trigger upon netlink uevents from the kernel
4 * Only kernels from version 2.6.10* on provide the uevent netlink socket.
5 * Until the libc-kernel-headers are updated, you need to compile with:
7 * gcc -I /lib/modules/`uname -r`/build/include -o uevent_listen uevent_listen.c
9 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation version 2 of the License.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program. If not, see <http://www.gnu.org/licenses/>.
34 #include <sys/socket.h>
38 #include <linux/types.h>
39 #include <linux/netlink.h>
54 #include "blacklist.h"
56 #define MAX_ACCUMULATION_COUNT 2048
57 #define MAX_ACCUMULATION_TIME 30*1000
58 #define MIN_BURST_SPEED 10
60 typedef int (uev_trigger)(struct uevent *, void * trigger_data);
63 pthread_mutex_t uevq_lock = PTHREAD_MUTEX_INITIALIZER;
64 pthread_mutex_t *uevq_lockp = &uevq_lock;
65 pthread_cond_t uev_cond = PTHREAD_COND_INITIALIZER;
66 pthread_cond_t *uev_condp = &uev_cond;
67 uev_trigger *my_uev_trigger;
68 void * my_trigger_data;
71 int is_uevent_busy(void)
75 pthread_mutex_lock(uevq_lockp);
76 empty = list_empty(&uevq);
77 pthread_mutex_unlock(uevq_lockp);
78 return (!empty || servicing_uev);
81 struct uevent * alloc_uevent (void)
83 struct uevent *uev = MALLOC(sizeof(struct uevent));
86 INIT_LIST_HEAD(&uev->node);
87 INIT_LIST_HEAD(&uev->merge_node);
94 uevq_cleanup(struct list_head *tmpq)
96 struct uevent *uev, *tmp;
98 list_for_each_entry_safe(uev, tmp, tmpq, node) {
99 list_del_init(&uev->node);
102 udev_device_unref(uev->udev);
108 uevent_get_wwid(struct uevent *uev)
112 struct config * conf;
114 conf = get_multipath_config();
115 uid_attribute = parse_uid_attribute_by_attrs(conf->uid_attrs, uev->kernel);
116 put_multipath_config(conf);
121 for (i = 0; uev->envp[i] != NULL; i++) {
122 if (!strncmp(uev->envp[i], uid_attribute, strlen(uid_attribute)) &&
123 strlen(uev->envp[i]) > strlen(uid_attribute) &&
124 uev->envp[i][strlen(uid_attribute)] == '=') {
125 uev->wwid = uev->envp[i] + strlen(uid_attribute) + 1;
133 uevent_need_merge(void)
135 struct config * conf;
136 bool need_merge = false;
138 conf = get_multipath_config();
141 put_multipath_config(conf);
147 uevent_can_discard(struct uevent *uev)
149 struct config * conf;
152 * do not filter dm devices by devnode
154 if (!strncmp(uev->kernel, "dm-", 3))
157 * filter paths devices by devnode
159 conf = get_multipath_config();
160 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
162 put_multipath_config(conf);
165 put_multipath_config(conf);
171 uevent_can_filter(struct uevent *earlier, struct uevent *later)
175 * filter earlier uvents if path has removed later. Eg:
176 * "add path1 |chang path1 |add path2 |remove path1"
178 * "add path2 |remove path1"
179 * uevents "add path1" and "chang path1" are filtered out
181 if (!strcmp(earlier->kernel, later->kernel) &&
182 !strcmp(later->action, "remove") &&
183 strncmp(later->kernel, "dm-", 3)) {
188 * filter change uvents if add uevents exist. Eg:
189 * "change path1| add path1 |add path2"
191 * "add path1 |add path2"
192 * uevent "chang path1" is filtered out
194 if (!strcmp(earlier->kernel, later->kernel) &&
195 !strcmp(earlier->action, "change") &&
196 !strcmp(later->action, "add") &&
197 strncmp(later->kernel, "dm-", 3)) {
205 merge_need_stop(struct uevent *earlier, struct uevent *later)
208 * dm uevent do not try to merge with left uevents
210 if (!strncmp(later->kernel, "dm-", 3))
214 * we can not make a jugement without wwid,
215 * so it is sensible to stop merging
217 if (!earlier->wwid || !later->wwid)
220 * uevents merging stoped
221 * when we meet an opposite action uevent from the same LUN to AVOID
222 * "add path1 |remove path1 |add path2 |remove path2 |add path3"
223 * to merge as "remove path1, path2" and "add path1, path2, path3"
225 * "remove path1 |add path1 |remove path2 |add path2 |remove path3"
226 * to merge as "add path1, path2" and "remove path1, path2, path3"
228 * when we meet a non-change uevent from the same LUN
229 * with the same wwid and different action
230 * it would be better to stop merging.
232 if (!strcmp(earlier->wwid, later->wwid) &&
233 strcmp(earlier->action, later->action) &&
234 strcmp(earlier->action, "change") &&
235 strcmp(later->action, "change"))
242 uevent_can_merge(struct uevent *earlier, struct uevent *later)
244 /* merge paths uevents
245 * whose wwids exsit and are same
246 * and actions are same,
247 * and actions are addition or deletion
249 if (earlier->wwid && later->wwid &&
250 !strcmp(earlier->wwid, later->wwid) &&
251 !strcmp(earlier->action, later->action) &&
252 strncmp(earlier->action, "change", 6) &&
253 strncmp(earlier->kernel, "dm-", 3)) {
261 uevent_prepare(struct list_head *tmpq)
263 struct uevent *uev, *tmp;
265 list_for_each_entry_reverse_safe(uev, tmp, tmpq, node) {
266 if (uevent_can_discard(uev)) {
267 list_del_init(&uev->node);
269 udev_device_unref(uev->udev);
274 if (strncmp(uev->kernel, "dm-", 3) &&
276 uevent_get_wwid(uev);
281 uevent_filter(struct uevent *later, struct list_head *tmpq)
283 struct uevent *earlier, *tmp;
285 list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
287 * filter unnessary earlier uevents
288 * by the later uevent
290 if (uevent_can_filter(earlier, later)) {
291 condlog(2, "uevent: %s-%s has filtered by uevent: %s-%s",
292 earlier->kernel, earlier->action,
293 later->kernel, later->action);
295 list_del_init(&earlier->node);
297 udev_device_unref(earlier->udev);
304 uevent_merge(struct uevent *later, struct list_head *tmpq)
306 struct uevent *earlier, *tmp;
308 list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
309 if (merge_need_stop(earlier, later))
312 * merge earlier uevents to the later uevent
314 if (uevent_can_merge(earlier, later)) {
315 condlog(2, "merged uevent: %s-%s-%s with uevent: %s-%s-%s",
316 earlier->action, earlier->kernel, earlier->wwid,
317 later->action, later->kernel, later->wwid);
319 list_move(&earlier->node, &later->merge_node);
325 merge_uevq(struct list_head *tmpq)
327 struct uevent *later;
329 uevent_prepare(tmpq);
330 list_for_each_entry_reverse(later, tmpq, node) {
331 uevent_filter(later, tmpq);
332 if(uevent_need_merge())
333 uevent_merge(later, tmpq);
338 service_uevq(struct list_head *tmpq)
340 struct uevent *uev, *tmp;
342 list_for_each_entry_safe(uev, tmp, tmpq, node) {
343 list_del_init(&uev->node);
345 if (my_uev_trigger && my_uev_trigger(uev, my_trigger_data))
346 condlog(0, "uevent trigger error");
348 uevq_cleanup(&uev->merge_node);
351 udev_device_unref(uev->udev);
356 static void uevent_cleanup(void *arg)
358 struct udev *udev = arg;
360 condlog(3, "Releasing uevent_listen() resources");
365 * Service the uevent queue.
367 int uevent_dispatch(int (*uev_trigger)(struct uevent *, void * trigger_data),
370 my_uev_trigger = uev_trigger;
371 my_trigger_data = trigger_data;
373 mlockall(MCL_CURRENT | MCL_FUTURE);
378 pthread_mutex_lock(uevq_lockp);
381 * Condition signals are unreliable,
382 * so make sure we only wait if we have to.
384 if (list_empty(&uevq)) {
385 pthread_cond_wait(uev_condp, uevq_lockp);
388 list_splice_init(&uevq, &uevq_tmp);
389 pthread_mutex_unlock(uevq_lockp);
392 merge_uevq(&uevq_tmp);
393 service_uevq(&uevq_tmp);
395 condlog(3, "Terminating uev service queue");
400 struct uevent *uevent_from_buffer(char *buf, ssize_t buflen)
408 uev = alloc_uevent();
410 condlog(1, "lost uevent, oom");
414 if ((size_t)buflen > sizeof(buf)-1)
415 buflen = sizeof(buf)-1;
418 * Copy the shared receive buffer contents to buffer private
419 * to this uevent so we can immediately reuse the shared buffer.
421 memcpy(uev->buffer, buf, HOTPLUG_BUFFER_SIZE + OBJECT_SIZE);
422 buffer = uev->buffer;
423 buffer[buflen] = '\0';
425 /* save start of payload */
426 bufpos = strlen(buffer) + 1;
429 uev->action = buffer;
430 pos = strchr(buffer, '@');
432 condlog(3, "bad action string '%s'", buffer);
439 uev->devpath = &pos[1];
441 /* hotplug events have the environment attached - reconstruct envp[] */
442 for (i = 0; (bufpos < (size_t)buflen) && (i < HOTPLUG_NUM_ENVP-1); i++) {
446 key = &buffer[bufpos];
447 keylen = strlen(key);
449 /* Filter out sequence number */
450 if (strncmp(key, "SEQNUM=", 7) == 0) {
453 uev->seqnum = strtoul(key + 7, &eptr, 10);
457 bufpos += keylen + 1;
461 condlog(3, "uevent %ld '%s' from '%s'", uev->seqnum,
462 uev->action, uev->devpath);
463 uev->kernel = strrchr(uev->devpath, '/');
467 /* print payload environment */
468 for (i = 0; uev->envp[i] != NULL; i++)
469 condlog(5, "%s", uev->envp[i]);
474 int failback_listen(void)
477 struct sockaddr_nl snl;
478 struct sockaddr_un sun;
481 int rcvbufsz = 128*1024;
483 int rcvszsz = sizeof(rcvsz);
484 unsigned int *prcvszsz = (unsigned int *)&rcvszsz;
485 const int feature_on = 1;
487 * First check whether we have a udev socket
489 memset(&sun, 0x00, sizeof(struct sockaddr_un));
490 sun.sun_family = AF_LOCAL;
491 strcpy(&sun.sun_path[1], "/org/kernel/dm/multipath_event");
492 addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(sun.sun_path+1) + 1;
494 sock = socket(AF_LOCAL, SOCK_DGRAM, 0);
497 condlog(3, "reading events from udev socket.");
499 /* the bind takes care of ensuring only one copy running */
500 retval = bind(sock, (struct sockaddr *) &sun, addrlen);
502 condlog(0, "bind failed, exit");
506 /* enable receiving of the sender credentials */
507 retval = setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
508 &feature_on, sizeof(feature_on));
510 condlog(0, "failed to enable credential passing, exit");
515 /* Fallback to read kernel netlink events */
516 memset(&snl, 0x00, sizeof(struct sockaddr_nl));
517 snl.nl_family = AF_NETLINK;
518 snl.nl_pid = getpid();
519 snl.nl_groups = 0x01;
521 sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
523 condlog(0, "error getting socket, exit");
527 condlog(3, "reading events from kernel.");
530 * try to avoid dropping uevents, even so, this is not a guarantee,
531 * but it does help to change the netlink uevent socket's
532 * receive buffer threshold from the default value of 106,496 to
533 * the maximum value of 262,142.
535 retval = setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvbufsz,
539 condlog(0, "error setting receive buffer size for socket, exit");
542 retval = getsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvsz, prcvszsz);
544 condlog(0, "error setting receive buffer size for socket, exit");
547 condlog(3, "receive buffer size for socket is %u.", rcvsz);
549 /* enable receiving of the sender credentials */
550 if (setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
551 &feature_on, sizeof(feature_on)) < 0) {
552 condlog(0, "error on enabling credential passing for socket");
556 retval = bind(sock, (struct sockaddr *) &snl,
557 sizeof(struct sockaddr_nl));
559 condlog(0, "bind failed, exit");
570 char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
571 struct cmsghdr *cmsg;
573 static char buf[HOTPLUG_BUFFER_SIZE + OBJECT_SIZE];
575 memset(buf, 0x00, sizeof(buf));
577 iov.iov_len = sizeof(buf);
578 memset (&smsg, 0x00, sizeof(struct msghdr));
581 smsg.msg_control = cred_msg;
582 smsg.msg_controllen = sizeof(cred_msg);
584 buflen = recvmsg(sock, &smsg, 0);
587 condlog(0, "error receiving message, errno %d", errno);
591 cmsg = CMSG_FIRSTHDR(&smsg);
592 if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
593 condlog(3, "no sender credentials received, message ignored");
597 cred = (struct ucred *)CMSG_DATA(cmsg);
598 if (cred->uid != 0) {
599 condlog(3, "sender uid=%d, message ignored", cred->uid);
604 bufpos = strlen(buf) + 1;
605 if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
606 condlog(3, "invalid message length");
610 /* check message header */
611 if (strstr(buf, "@/") == NULL) {
612 condlog(3, "unrecognized message header");
615 if ((size_t)buflen > sizeof(buf)-1) {
616 condlog(2, "buffer overflow for received uevent");
617 buflen = sizeof(buf)-1;
620 uev = uevent_from_buffer(buf, buflen);
624 * Queue uevent and poke service pthread.
626 pthread_mutex_lock(uevq_lockp);
627 list_add_tail(&uev->node, &uevq);
628 pthread_cond_signal(uev_condp);
629 pthread_mutex_unlock(uevq_lockp);
637 struct uevent *uevent_from_udev_device(struct udev_device *dev)
642 struct udev_list_entry *list_entry;
644 uev = alloc_uevent();
646 udev_device_unref(dev);
647 condlog(1, "lost uevent, oom");
651 end = pos + HOTPLUG_BUFFER_SIZE + OBJECT_SIZE - 1;
652 udev_list_entry_foreach(list_entry, udev_device_get_properties_list_entry(dev)) {
653 const char *name, *value;
656 name = udev_list_entry_get_name(list_entry);
659 value = udev_list_entry_get_value(list_entry);
662 bytes = snprintf(pos, end - pos, "%s=%s", name, value);
663 if (pos + bytes >= end) {
664 condlog(2, "buffer overflow for uevent");
671 if (strcmp(name, "DEVPATH") == 0)
672 uev->devpath = uev->envp[i] + 8;
673 if (strcmp(name, "ACTION") == 0)
674 uev->action = uev->envp[i] + 7;
676 if (i == HOTPLUG_NUM_ENVP - 1)
682 condlog(3, "uevent '%s' from '%s'", uev->action, uev->devpath);
683 uev->kernel = strrchr(uev->devpath, '/');
687 /* print payload environment */
688 for (i = 0; uev->envp[i] != NULL; i++)
689 condlog(5, "%s", uev->envp[i]);
693 bool uevent_burst(struct timeval *start_time, int events)
695 struct timeval diff_time, end_time;
697 unsigned long eclipse_ms;
699 if(events > MAX_ACCUMULATION_COUNT) {
700 condlog(2, "burst got %u uevents, too much uevents, stopped", events);
704 gettimeofday(&end_time, NULL);
705 timersub(&end_time, start_time, &diff_time);
707 eclipse_ms = diff_time.tv_sec * 1000 + diff_time.tv_usec / 1000;
712 if (eclipse_ms > MAX_ACCUMULATION_TIME) {
713 condlog(2, "burst continued %lu ms, too long time, stopped", eclipse_ms);
717 speed = (events * 1000) / eclipse_ms;
718 if (speed > MIN_BURST_SPEED)
724 int uevent_listen(struct udev *udev)
727 struct udev_monitor *monitor = NULL;
728 int fd, socket_flags, events;
729 struct timeval start_time;
730 int need_failback = 1;
732 LIST_HEAD(uevlisten_tmp);
735 * Queue uevents for service by dedicated thread so that the uevent
736 * listening thread does not block on multipathd locks (vecs->lock)
737 * thereby not getting to empty the socket's receive buffer queue
741 condlog(1, "no udev context");
745 pthread_cleanup_push(uevent_cleanup, udev);
747 monitor = udev_monitor_new_from_netlink(udev, "udev");
749 condlog(2, "failed to create udev monitor");
752 #ifdef LIBUDEV_API_RECVBUF
753 if (udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024))
754 condlog(2, "failed to increase buffer size");
756 fd = udev_monitor_get_fd(monitor);
758 condlog(2, "failed to get monitor fd");
761 socket_flags = fcntl(fd, F_GETFL);
762 if (socket_flags < 0) {
763 condlog(2, "failed to get monitor socket flags : %s",
767 if (fcntl(fd, F_SETFL, socket_flags & ~O_NONBLOCK) < 0) {
768 condlog(2, "failed to set monitor socket flags : %s",
772 err = udev_monitor_filter_add_match_subsystem_devtype(monitor, "block",
775 condlog(2, "failed to create filter : %s", strerror(-err));
776 err = udev_monitor_enable_receiving(monitor);
778 condlog(2, "failed to enable receiving : %s", strerror(-err));
783 gettimeofday(&start_time, NULL);
786 struct udev_device *dev;
787 struct pollfd ev_poll;
791 memset(&ev_poll, 0, sizeof(struct pollfd));
793 ev_poll.events = POLLIN;
794 poll_timeout = timeout * 1000;
796 fdcount = poll(&ev_poll, 1, poll_timeout);
797 if (fdcount && ev_poll.revents & POLLIN) {
798 timeout = uevent_burst(&start_time, events + 1) ? 1 : 0;
799 dev = udev_monitor_receive_device(monitor);
801 condlog(0, "failed getting udev device");
804 uev = uevent_from_udev_device(dev);
807 list_add_tail(&uev->node, &uevlisten_tmp);
815 condlog(0, "error receiving "
816 "uevent message: %m");
820 if (!list_empty(&uevlisten_tmp)) {
822 * Queue uevents and poke service pthread.
824 condlog(3, "Forwarding %d uevents", events);
825 pthread_mutex_lock(uevq_lockp);
826 list_splice_tail_init(&uevlisten_tmp, &uevq);
827 pthread_cond_signal(uev_condp);
828 pthread_mutex_unlock(uevq_lockp);
831 gettimeofday(&start_time, NULL);
837 udev_monitor_unref(monitor);
839 err = failback_listen();
840 pthread_cleanup_pop(1);
844 int uevent_get_major(struct uevent *uev)
849 for (i = 0; uev->envp[i] != NULL; i++) {
850 if (!strncmp(uev->envp[i], "MAJOR", 5) && strlen(uev->envp[i]) > 6) {
851 p = uev->envp[i] + 6;
852 major = strtoul(p, &q, 10);
854 condlog(2, "invalid major '%s'", p);
863 int uevent_get_minor(struct uevent *uev)
868 for (i = 0; uev->envp[i] != NULL; i++) {
869 if (!strncmp(uev->envp[i], "MINOR", 5) && strlen(uev->envp[i]) > 6) {
870 p = uev->envp[i] + 6;
871 minor = strtoul(p, &q, 10);
873 condlog(2, "invalid minor '%s'", p);
882 int uevent_get_disk_ro(struct uevent *uev)
887 for (i = 0; uev->envp[i] != NULL; i++) {
888 if (!strncmp(uev->envp[i], "DISK_RO", 6) && strlen(uev->envp[i]) > 7) {
889 p = uev->envp[i] + 8;
890 ro = strtoul(p, &q, 10);
892 condlog(2, "invalid read_only setting '%s'", p);
901 char *uevent_get_dm_name(struct uevent *uev)
906 for (i = 0; uev->envp[i] != NULL; i++) {
907 if (!strncmp(uev->envp[i], "DM_NAME", 6) &&
908 strlen(uev->envp[i]) > 7) {
909 p = MALLOC(strlen(uev->envp[i] + 8) + 1);
910 strcpy(p, uev->envp[i] + 8);