Imported Upstream version 0.8.2
[platform/upstream/multipath-tools.git] / libmultipath / uevent.c
1 /*
2  * uevent.c - trigger upon netlink uevents from the kernel
3  *
4  *      Only kernels from version 2.6.10* on provide the uevent netlink socket.
5  *      Until the libc-kernel-headers are updated, you need to compile with:
6  *
7  *        gcc -I /lib/modules/`uname -r`/build/include -o uevent_listen uevent_listen.c
8  *
9  * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
10  *
11  *      This program is free software; you can redistribute it and/or modify it
12  *      under the terms of the GNU General Public License as published by the
13  *      Free Software Foundation version 2 of the License.
14  *
15  *      This program is distributed in the hope that it will be useful, but
16  *      WITHOUT ANY WARRANTY; without even the implied warranty of
17  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  *      General Public License for more details.
19  *
20  *      You should have received a copy of the GNU General Public License along
21  *      with this program.  If not, see <http://www.gnu.org/licenses/>.
22  *
23  */
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdbool.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <stddef.h>
31 #include <string.h>
32 #include <fcntl.h>
33 #include <time.h>
34 #include <sys/socket.h>
35 #include <sys/user.h>
36 #include <sys/un.h>
37 #include <poll.h>
38 #include <linux/types.h>
39 #include <linux/netlink.h>
40 #include <pthread.h>
41 #include <sys/mman.h>
42 #include <sys/time.h>
43 #include <libudev.h>
44 #include <errno.h>
45
46 #include "memory.h"
47 #include "debug.h"
48 #include "list.h"
49 #include "uevent.h"
50 #include "vector.h"
51 #include "structs.h"
52 #include "util.h"
53 #include "config.h"
54 #include "blacklist.h"
55 #include "devmapper.h"
56
57 #define MAX_ACCUMULATION_COUNT 2048
58 #define MAX_ACCUMULATION_TIME 30*1000
59 #define MIN_BURST_SPEED 10
60
61 typedef int (uev_trigger)(struct uevent *, void * trigger_data);
62
63 LIST_HEAD(uevq);
64 pthread_mutex_t uevq_lock = PTHREAD_MUTEX_INITIALIZER;
65 pthread_mutex_t *uevq_lockp = &uevq_lock;
66 pthread_cond_t uev_cond = PTHREAD_COND_INITIALIZER;
67 pthread_cond_t *uev_condp = &uev_cond;
68 uev_trigger *my_uev_trigger;
69 void * my_trigger_data;
70 int servicing_uev;
71
72 int is_uevent_busy(void)
73 {
74         int empty;
75
76         pthread_mutex_lock(uevq_lockp);
77         empty = list_empty(&uevq);
78         pthread_mutex_unlock(uevq_lockp);
79         return (!empty || servicing_uev);
80 }
81
82 struct uevent * alloc_uevent (void)
83 {
84         struct uevent *uev = MALLOC(sizeof(struct uevent));
85
86         if (uev) {
87                 INIT_LIST_HEAD(&uev->node);
88                 INIT_LIST_HEAD(&uev->merge_node);
89         }
90
91         return uev;
92 }
93
94 void
95 uevq_cleanup(struct list_head *tmpq)
96 {
97         struct uevent *uev, *tmp;
98
99         list_for_each_entry_safe(uev, tmp, tmpq, node) {
100                 list_del_init(&uev->node);
101
102                 if (uev->udev)
103                         udev_device_unref(uev->udev);
104                 FREE(uev);
105         }
106 }
107
108 static const char* uevent_get_env_var(const struct uevent *uev,
109                                       const char *attr)
110 {
111         int i, len;
112         const char *p = NULL;
113
114         if (attr == NULL)
115                 goto invalid;
116
117         len = strlen(attr);
118         if (len == 0)
119                 goto invalid;
120
121         for (i = 0; uev->envp[i] != NULL; i++) {
122                 const char *var = uev->envp[i];
123
124                 if (strlen(var) > len &&
125                     !memcmp(var, attr, len) && var[len] == '=') {
126                         p = var + len + 1;
127                         break;
128                 }
129         }
130
131         condlog(4, "%s: %s -> '%s'", __func__, attr, p);
132         return p;
133
134 invalid:
135         condlog(2, "%s: empty variable name", __func__);
136         return NULL;
137 }
138
139 static int uevent_get_env_positive_int(const struct uevent *uev,
140                                        const char *attr)
141 {
142         const char *p = uevent_get_env_var(uev, attr);
143         char *q;
144         int ret;
145
146         if (p == NULL || *p == '\0')
147                 return -1;
148
149         ret = strtoul(p, &q, 10);
150         if (*q != '\0' || ret < 0) {
151                 condlog(2, "%s: invalid %s: '%s'", __func__, attr, p);
152                 return -1;
153         }
154         return ret;
155 }
156
157 void
158 uevent_get_wwid(struct uevent *uev)
159 {
160         char *uid_attribute;
161         const char *val;
162         struct config * conf;
163
164         conf = get_multipath_config();
165         pthread_cleanup_push(put_multipath_config, conf);
166         uid_attribute = get_uid_attribute_by_attrs(conf, uev->kernel);
167         pthread_cleanup_pop(1);
168
169         val = uevent_get_env_var(uev, uid_attribute);
170         if (val)
171                 uev->wwid = val;
172 }
173
174 bool
175 uevent_need_merge(void)
176 {
177         struct config * conf;
178         bool need_merge = false;
179
180         conf = get_multipath_config();
181         if (VECTOR_SIZE(&conf->uid_attrs) > 0)
182                 need_merge = true;
183         put_multipath_config(conf);
184
185         return need_merge;
186 }
187
188 bool
189 uevent_can_discard(struct uevent *uev)
190 {
191         int invalid = 0;
192         struct config * conf;
193
194         /*
195          * do not filter dm devices by devnode
196          */
197         if (!strncmp(uev->kernel, "dm-", 3))
198                 return false;
199         /*
200          * filter paths devices by devnode
201          */
202         conf = get_multipath_config();
203         pthread_cleanup_push(put_multipath_config, conf);
204         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
205                            uev->kernel) > 0)
206                 invalid = 1;
207         pthread_cleanup_pop(1);
208
209         if (invalid)
210                 return true;
211         return false;
212 }
213
214 bool
215 uevent_can_filter(struct uevent *earlier, struct uevent *later)
216 {
217
218         /*
219          * filter earlier uvents if path has removed later. Eg:
220          * "add path1 |chang path1 |add path2 |remove path1"
221          * can filter as:
222          * "add path2 |remove path1"
223          * uevents "add path1" and "chang path1" are filtered out
224          */
225         if (!strcmp(earlier->kernel, later->kernel) &&
226                 !strcmp(later->action, "remove") &&
227                 strncmp(later->kernel, "dm-", 3)) {
228                 return true;
229         }
230
231         /*
232          * filter change uvents if add uevents exist. Eg:
233          * "change path1| add path1 |add path2"
234          * can filter as:
235          * "add path1 |add path2"
236          * uevent "chang path1" is filtered out
237          */
238         if (!strcmp(earlier->kernel, later->kernel) &&
239                 !strcmp(earlier->action, "change") &&
240                 !strcmp(later->action, "add") &&
241                 strncmp(later->kernel, "dm-", 3)) {
242                 return true;
243         }
244
245         return false;
246 }
247
248 bool
249 merge_need_stop(struct uevent *earlier, struct uevent *later)
250 {
251         /*
252          * dm uevent do not try to merge with left uevents
253          */
254         if (!strncmp(later->kernel, "dm-", 3))
255                 return true;
256
257         /*
258          * we can not make a jugement without wwid,
259          * so it is sensible to stop merging
260          */
261         if (!earlier->wwid || !later->wwid)
262                 return true;
263         /*
264          * uevents merging stopped
265          * when we meet an opposite action uevent from the same LUN to AVOID
266          * "add path1 |remove path1 |add path2 |remove path2 |add path3"
267          * to merge as "remove path1, path2" and "add path1, path2, path3"
268          * OR
269          * "remove path1 |add path1 |remove path2 |add path2 |remove path3"
270          * to merge as "add path1, path2" and "remove path1, path2, path3"
271          * SO
272          * when we meet a non-change uevent from the same LUN
273          * with the same wwid and different action
274          * it would be better to stop merging.
275          */
276         if (!strcmp(earlier->wwid, later->wwid) &&
277             strcmp(earlier->action, later->action) &&
278             strcmp(earlier->action, "change") &&
279             strcmp(later->action, "change"))
280                 return true;
281
282         return false;
283 }
284
285 bool
286 uevent_can_merge(struct uevent *earlier, struct uevent *later)
287 {
288         /* merge paths uevents
289          * whose wwids exsit and are same
290          * and actions are same,
291          * and actions are addition or deletion
292          */
293         if (earlier->wwid && later->wwid &&
294             !strcmp(earlier->wwid, later->wwid) &&
295             !strcmp(earlier->action, later->action) &&
296             strncmp(earlier->action, "change", 6) &&
297             strncmp(earlier->kernel, "dm-", 3)) {
298                 return true;
299         }
300
301         return false;
302 }
303
304 void
305 uevent_prepare(struct list_head *tmpq)
306 {
307         struct uevent *uev, *tmp;
308
309         list_for_each_entry_reverse_safe(uev, tmp, tmpq, node) {
310                 if (uevent_can_discard(uev)) {
311                         list_del_init(&uev->node);
312                         if (uev->udev)
313                                 udev_device_unref(uev->udev);
314                         FREE(uev);
315                         continue;
316                 }
317
318                 if (strncmp(uev->kernel, "dm-", 3) &&
319                     uevent_need_merge())
320                         uevent_get_wwid(uev);
321         }
322 }
323
324 void
325 uevent_filter(struct uevent *later, struct list_head *tmpq)
326 {
327         struct uevent *earlier, *tmp;
328
329         list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
330                 /*
331                  * filter unnessary earlier uevents
332                  * by the later uevent
333                  */
334                 if (uevent_can_filter(earlier, later)) {
335                         condlog(3, "uevent: %s-%s has filtered by uevent: %s-%s",
336                                 earlier->kernel, earlier->action,
337                                 later->kernel, later->action);
338
339                         list_del_init(&earlier->node);
340                         if (earlier->udev)
341                                 udev_device_unref(earlier->udev);
342                         FREE(earlier);
343                 }
344         }
345 }
346
347 void
348 uevent_merge(struct uevent *later, struct list_head *tmpq)
349 {
350         struct uevent *earlier, *tmp;
351
352         list_for_some_entry_reverse_safe(earlier, tmp, &later->node, tmpq, node) {
353                 if (merge_need_stop(earlier, later))
354                         break;
355                 /*
356                  * merge earlier uevents to the later uevent
357                  */
358                 if (uevent_can_merge(earlier, later)) {
359                         condlog(3, "merged uevent: %s-%s-%s with uevent: %s-%s-%s",
360                                 earlier->action, earlier->kernel, earlier->wwid,
361                                 later->action, later->kernel, later->wwid);
362
363                         list_move(&earlier->node, &later->merge_node);
364                 }
365         }
366 }
367
368 void
369 merge_uevq(struct list_head *tmpq)
370 {
371         struct uevent *later;
372
373         uevent_prepare(tmpq);
374         list_for_each_entry_reverse(later, tmpq, node) {
375                 uevent_filter(later, tmpq);
376                 if(uevent_need_merge())
377                         uevent_merge(later, tmpq);
378         }
379 }
380
381 void
382 service_uevq(struct list_head *tmpq)
383 {
384         struct uevent *uev, *tmp;
385
386         list_for_each_entry_safe(uev, tmp, tmpq, node) {
387                 list_del_init(&uev->node);
388
389                 if (my_uev_trigger && my_uev_trigger(uev, my_trigger_data))
390                         condlog(0, "uevent trigger error");
391
392                 uevq_cleanup(&uev->merge_node);
393
394                 if (uev->udev)
395                         udev_device_unref(uev->udev);
396                 FREE(uev);
397         }
398 }
399
400 static void uevent_cleanup(void *arg)
401 {
402         struct udev *udev = arg;
403
404         condlog(3, "Releasing uevent_listen() resources");
405         udev_unref(udev);
406 }
407
408 static void monitor_cleanup(void *arg)
409 {
410         struct udev_monitor *monitor = arg;
411
412         condlog(3, "Releasing uevent_monitor() resources");
413         udev_monitor_unref(monitor);
414 }
415
416 /*
417  * Service the uevent queue.
418  */
419 int uevent_dispatch(int (*uev_trigger)(struct uevent *, void * trigger_data),
420                     void * trigger_data)
421 {
422         my_uev_trigger = uev_trigger;
423         my_trigger_data = trigger_data;
424
425         mlockall(MCL_CURRENT | MCL_FUTURE);
426
427         while (1) {
428                 LIST_HEAD(uevq_tmp);
429
430                 pthread_mutex_lock(uevq_lockp);
431                 servicing_uev = 0;
432                 /*
433                  * Condition signals are unreliable,
434                  * so make sure we only wait if we have to.
435                  */
436                 if (list_empty(&uevq)) {
437                         pthread_cond_wait(uev_condp, uevq_lockp);
438                 }
439                 servicing_uev = 1;
440                 list_splice_init(&uevq, &uevq_tmp);
441                 pthread_mutex_unlock(uevq_lockp);
442                 if (!my_uev_trigger)
443                         break;
444                 merge_uevq(&uevq_tmp);
445                 service_uevq(&uevq_tmp);
446         }
447         condlog(3, "Terminating uev service queue");
448         uevq_cleanup(&uevq);
449         return 0;
450 }
451
452 struct uevent *uevent_from_buffer(char *buf, ssize_t buflen)
453 {
454         struct uevent *uev;
455         char *buffer;
456         size_t bufpos;
457         int i;
458         char *pos;
459
460         uev = alloc_uevent();
461         if (!uev) {
462                 condlog(1, "lost uevent, oom");
463                 return NULL;
464         }
465
466         if ((size_t)buflen > sizeof(buf)-1)
467                 buflen = sizeof(buf)-1;
468
469         /*
470          * Copy the shared receive buffer contents to buffer private
471          * to this uevent so we can immediately reuse the shared buffer.
472          */
473         memcpy(uev->buffer, buf, HOTPLUG_BUFFER_SIZE + OBJECT_SIZE);
474         buffer = uev->buffer;
475         buffer[buflen] = '\0';
476
477         /* save start of payload */
478         bufpos = strlen(buffer) + 1;
479
480         /* action string */
481         uev->action = buffer;
482         pos = strchr(buffer, '@');
483         if (!pos) {
484                 condlog(3, "bad action string '%s'", buffer);
485                 FREE(uev);
486                 return NULL;
487         }
488         pos[0] = '\0';
489
490         /* sysfs path */
491         uev->devpath = &pos[1];
492
493         /* hotplug events have the environment attached - reconstruct envp[] */
494         for (i = 0; (bufpos < (size_t)buflen) && (i < HOTPLUG_NUM_ENVP-1); i++) {
495                 int keylen;
496                 char *key;
497
498                 key = &buffer[bufpos];
499                 keylen = strlen(key);
500                 uev->envp[i] = key;
501                 /* Filter out sequence number */
502                 if (strncmp(key, "SEQNUM=", 7) == 0) {
503                         char *eptr;
504
505                         uev->seqnum = strtoul(key + 7, &eptr, 10);
506                         if (eptr == key + 7)
507                                 uev->seqnum = -1;
508                 }
509                 bufpos += keylen + 1;
510         }
511         uev->envp[i] = NULL;
512
513         condlog(3, "uevent %ld '%s' from '%s'", uev->seqnum,
514                 uev->action, uev->devpath);
515         uev->kernel = strrchr(uev->devpath, '/');
516         if (uev->kernel)
517                 uev->kernel++;
518
519         /* print payload environment */
520         for (i = 0; uev->envp[i] != NULL; i++)
521                 condlog(5, "%s", uev->envp[i]);
522
523         return uev;
524 }
525
526 int failback_listen(void)
527 {
528         int sock;
529         struct sockaddr_nl snl;
530         struct sockaddr_un sun;
531         socklen_t addrlen;
532         int retval;
533         int rcvbufsz = 128*1024;
534         int rcvsz = 0;
535         int rcvszsz = sizeof(rcvsz);
536         unsigned int *prcvszsz = (unsigned int *)&rcvszsz;
537         const int feature_on = 1;
538         /*
539          * First check whether we have a udev socket
540          */
541         memset(&sun, 0x00, sizeof(struct sockaddr_un));
542         sun.sun_family = AF_LOCAL;
543         strcpy(&sun.sun_path[1], "/org/kernel/dm/multipath_event");
544         addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(sun.sun_path+1) + 1;
545
546         sock = socket(AF_LOCAL, SOCK_DGRAM, 0);
547         if (sock >= 0) {
548
549                 condlog(3, "reading events from udev socket.");
550
551                 /* the bind takes care of ensuring only one copy running */
552                 retval = bind(sock, (struct sockaddr *) &sun, addrlen);
553                 if (retval < 0) {
554                         condlog(0, "bind failed, exit");
555                         goto exit;
556                 }
557
558                 /* enable receiving of the sender credentials */
559                 retval = setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
560                                     &feature_on, sizeof(feature_on));
561                 if (retval < 0) {
562                         condlog(0, "failed to enable credential passing, exit");
563                         goto exit;
564                 }
565
566         } else {
567                 /* Fallback to read kernel netlink events */
568                 memset(&snl, 0x00, sizeof(struct sockaddr_nl));
569                 snl.nl_family = AF_NETLINK;
570                 snl.nl_pid = getpid();
571                 snl.nl_groups = 0x01;
572
573                 sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
574                 if (sock == -1) {
575                         condlog(0, "error getting socket, exit");
576                         return 1;
577                 }
578
579                 condlog(3, "reading events from kernel.");
580
581                 /*
582                  * try to avoid dropping uevents, even so, this is not a guarantee,
583                  * but it does help to change the netlink uevent socket's
584                  * receive buffer threshold from the default value of 106,496 to
585                  * the maximum value of 262,142.
586                  */
587                 retval = setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvbufsz,
588                                     sizeof(rcvbufsz));
589
590                 if (retval < 0) {
591                         condlog(0, "error setting receive buffer size for socket, exit");
592                         exit(1);
593                 }
594                 retval = getsockopt(sock, SOL_SOCKET, SO_RCVBUF, &rcvsz, prcvszsz);
595                 if (retval < 0) {
596                         condlog(0, "error setting receive buffer size for socket, exit");
597                         exit(1);
598                 }
599                 condlog(3, "receive buffer size for socket is %u.", rcvsz);
600
601                 /* enable receiving of the sender credentials */
602                 if (setsockopt(sock, SOL_SOCKET, SO_PASSCRED,
603                                &feature_on, sizeof(feature_on)) < 0) {
604                         condlog(0, "error on enabling credential passing for socket");
605                         exit(1);
606                 }
607
608                 retval = bind(sock, (struct sockaddr *) &snl,
609                               sizeof(struct sockaddr_nl));
610                 if (retval < 0) {
611                         condlog(0, "bind failed, exit");
612                         goto exit;
613                 }
614         }
615
616         while (1) {
617                 size_t bufpos;
618                 ssize_t buflen;
619                 struct uevent *uev;
620                 struct msghdr smsg;
621                 struct iovec iov;
622                 char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
623                 struct cmsghdr *cmsg;
624                 struct ucred *cred;
625                 static char buf[HOTPLUG_BUFFER_SIZE + OBJECT_SIZE];
626
627                 memset(buf, 0x00, sizeof(buf));
628                 iov.iov_base = &buf;
629                 iov.iov_len = sizeof(buf);
630                 memset (&smsg, 0x00, sizeof(struct msghdr));
631                 smsg.msg_iov = &iov;
632                 smsg.msg_iovlen = 1;
633                 smsg.msg_control = cred_msg;
634                 smsg.msg_controllen = sizeof(cred_msg);
635
636                 buflen = recvmsg(sock, &smsg, 0);
637                 if (buflen < 0) {
638                         if (errno != EINTR)
639                                 condlog(0, "error receiving message, errno %d", errno);
640                         continue;
641                 }
642
643                 cmsg = CMSG_FIRSTHDR(&smsg);
644                 if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
645                         condlog(3, "no sender credentials received, message ignored");
646                         continue;
647                 }
648
649                 cred = (struct ucred *)CMSG_DATA(cmsg);
650                 if (cred->uid != 0) {
651                         condlog(3, "sender uid=%d, message ignored", cred->uid);
652                         continue;
653                 }
654
655                 /* skip header */
656                 bufpos = strlen(buf) + 1;
657                 if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
658                         condlog(3, "invalid message length");
659                         continue;
660                 }
661
662                 /* check message header */
663                 if (strstr(buf, "@/") == NULL) {
664                         condlog(3, "unrecognized message header");
665                         continue;
666                 }
667                 if ((size_t)buflen > sizeof(buf)-1) {
668                         condlog(2, "buffer overflow for received uevent");
669                         buflen = sizeof(buf)-1;
670                 }
671
672                 uev = uevent_from_buffer(buf, buflen);
673                 if (!uev)
674                         continue;
675                 /*
676                  * Queue uevent and poke service pthread.
677                  */
678                 pthread_mutex_lock(uevq_lockp);
679                 list_add_tail(&uev->node, &uevq);
680                 pthread_cond_signal(uev_condp);
681                 pthread_mutex_unlock(uevq_lockp);
682         }
683
684 exit:
685         close(sock);
686         return 1;
687 }
688
689 struct uevent *uevent_from_udev_device(struct udev_device *dev)
690 {
691         struct uevent *uev;
692         int i = 0;
693         char *pos, *end;
694         struct udev_list_entry *list_entry;
695
696         uev = alloc_uevent();
697         if (!uev) {
698                 udev_device_unref(dev);
699                 condlog(1, "lost uevent, oom");
700                 return NULL;
701         }
702         pos = uev->buffer;
703         end = pos + HOTPLUG_BUFFER_SIZE + OBJECT_SIZE - 1;
704         udev_list_entry_foreach(list_entry, udev_device_get_properties_list_entry(dev)) {
705                 const char *name, *value;
706                 int bytes;
707
708                 name = udev_list_entry_get_name(list_entry);
709                 if (!name)
710                         name = "(null)";
711                 value = udev_list_entry_get_value(list_entry);
712                 if (!value)
713                         value = "(null)";
714                 bytes = snprintf(pos, end - pos, "%s=%s", name, value);
715                 if (pos + bytes >= end) {
716                         condlog(2, "buffer overflow for uevent");
717                         break;
718                 }
719                 uev->envp[i] = pos;
720                 pos += bytes;
721                 *pos = '\0';
722                 pos++;
723                 if (strcmp(name, "DEVPATH") == 0)
724                         uev->devpath = uev->envp[i] + 8;
725                 if (strcmp(name, "ACTION") == 0)
726                         uev->action = uev->envp[i] + 7;
727                 i++;
728                 if (i == HOTPLUG_NUM_ENVP - 1)
729                         break;
730         }
731         if (!uev->devpath || ! uev->action) {
732                 udev_device_unref(dev);
733                 condlog(1, "uevent missing necessary fields");
734                 FREE(uev);
735                 return NULL;
736         }
737         uev->udev = dev;
738         uev->envp[i] = NULL;
739
740         condlog(3, "uevent '%s' from '%s'", uev->action, uev->devpath);
741         uev->kernel = strrchr(uev->devpath, '/');
742         if (uev->kernel)
743                 uev->kernel++;
744
745         /* print payload environment */
746         for (i = 0; uev->envp[i] != NULL; i++)
747                 condlog(5, "%s", uev->envp[i]);
748         return uev;
749 }
750
751 bool uevent_burst(struct timeval *start_time, int events)
752 {
753         struct timeval diff_time, end_time;
754         unsigned long speed;
755         unsigned long eclipse_ms;
756
757         if(events > MAX_ACCUMULATION_COUNT) {
758                 condlog(2, "burst got %u uevents, too much uevents, stopped", events);
759                 return false;
760         }
761
762         gettimeofday(&end_time, NULL);
763         timersub(&end_time, start_time, &diff_time);
764
765         eclipse_ms = diff_time.tv_sec * 1000 + diff_time.tv_usec / 1000;
766
767         if (eclipse_ms == 0)
768                 return true;
769
770         if (eclipse_ms > MAX_ACCUMULATION_TIME) {
771                 condlog(2, "burst continued %lu ms, too long time, stopped", eclipse_ms);
772                 return false;
773         }
774
775         speed = (events * 1000) / eclipse_ms;
776         if (speed > MIN_BURST_SPEED)
777                 return true;
778
779         return false;
780 }
781
782 int uevent_listen(struct udev *udev)
783 {
784         int err = 2;
785         struct udev_monitor *monitor = NULL;
786         int fd, socket_flags, events;
787         struct timeval start_time;
788         int need_failback = 1;
789         int timeout = 30;
790         LIST_HEAD(uevlisten_tmp);
791
792         /*
793          * Queue uevents for service by dedicated thread so that the uevent
794          * listening thread does not block on multipathd locks (vecs->lock)
795          * thereby not getting to empty the socket's receive buffer queue
796          * often enough.
797          */
798         if (!udev) {
799                 condlog(1, "no udev context");
800                 return 1;
801         }
802         udev_ref(udev);
803         pthread_cleanup_push(uevent_cleanup, udev);
804
805         monitor = udev_monitor_new_from_netlink(udev, "udev");
806         if (!monitor) {
807                 condlog(2, "failed to create udev monitor");
808                 goto failback;
809         }
810         pthread_cleanup_push(monitor_cleanup, monitor);
811 #ifdef LIBUDEV_API_RECVBUF
812         if (udev_monitor_set_receive_buffer_size(monitor, 128 * 1024 * 1024))
813                 condlog(2, "failed to increase buffer size");
814 #endif
815         fd = udev_monitor_get_fd(monitor);
816         if (fd < 0) {
817                 condlog(2, "failed to get monitor fd");
818                 goto out;
819         }
820         socket_flags = fcntl(fd, F_GETFL);
821         if (socket_flags < 0) {
822                 condlog(2, "failed to get monitor socket flags : %s",
823                         strerror(errno));
824                 goto out;
825         }
826         if (fcntl(fd, F_SETFL, socket_flags & ~O_NONBLOCK) < 0) {
827                 condlog(2, "failed to set monitor socket flags : %s",
828                         strerror(errno));
829                 goto out;
830         }
831         err = udev_monitor_filter_add_match_subsystem_devtype(monitor, "block",
832                                                               "disk");
833         if (err)
834                 condlog(2, "failed to create filter : %s", strerror(-err));
835         err = udev_monitor_enable_receiving(monitor);
836         if (err) {
837                 condlog(2, "failed to enable receiving : %s", strerror(-err));
838                 goto out;
839         }
840
841         events = 0;
842         gettimeofday(&start_time, NULL);
843         while (1) {
844                 struct uevent *uev;
845                 struct udev_device *dev;
846                 struct pollfd ev_poll;
847                 int poll_timeout;
848                 int fdcount;
849
850                 memset(&ev_poll, 0, sizeof(struct pollfd));
851                 ev_poll.fd = fd;
852                 ev_poll.events = POLLIN;
853                 poll_timeout = timeout * 1000;
854                 errno = 0;
855                 fdcount = poll(&ev_poll, 1, poll_timeout);
856                 if (fdcount && ev_poll.revents & POLLIN) {
857                         timeout = uevent_burst(&start_time, events + 1) ? 1 : 0;
858                         dev = udev_monitor_receive_device(monitor);
859                         if (!dev) {
860                                 condlog(0, "failed getting udev device");
861                                 continue;
862                         }
863                         uev = uevent_from_udev_device(dev);
864                         if (!uev)
865                                 continue;
866                         list_add_tail(&uev->node, &uevlisten_tmp);
867                         events++;
868                         continue;
869                 }
870                 if (fdcount < 0) {
871                         if (errno == EINTR)
872                                 continue;
873
874                         condlog(0, "error receiving "
875                                 "uevent message: %m");
876                         err = -errno;
877                         break;
878                 }
879                 if (!list_empty(&uevlisten_tmp)) {
880                         /*
881                          * Queue uevents and poke service pthread.
882                          */
883                         condlog(3, "Forwarding %d uevents", events);
884                         pthread_mutex_lock(uevq_lockp);
885                         list_splice_tail_init(&uevlisten_tmp, &uevq);
886                         pthread_cond_signal(uev_condp);
887                         pthread_mutex_unlock(uevq_lockp);
888                         events = 0;
889                 }
890                 gettimeofday(&start_time, NULL);
891                 timeout = 30;
892         }
893         need_failback = 0;
894 out:
895         pthread_cleanup_pop(1);
896 failback:
897         if (need_failback)
898                 err = failback_listen();
899         pthread_cleanup_pop(1);
900         return err;
901 }
902
903 int uevent_get_major(const struct uevent *uev)
904 {
905         return uevent_get_env_positive_int(uev, "MAJOR");
906 }
907
908 int uevent_get_minor(const struct uevent *uev)
909 {
910         return uevent_get_env_positive_int(uev, "MINOR");
911 }
912
913 int uevent_get_disk_ro(const struct uevent *uev)
914 {
915         return uevent_get_env_positive_int(uev, "DISK_RO");
916 }
917
918 static char *uevent_get_dm_str(const struct uevent *uev, char *attr)
919 {
920         const char *tmp = uevent_get_env_var(uev, attr);
921
922         if (tmp == NULL)
923                 return NULL;
924         return strdup(tmp);
925 }
926
927 char *uevent_get_dm_name(const struct uevent *uev)
928 {
929         return uevent_get_dm_str(uev, "DM_NAME");
930 }
931
932 char *uevent_get_dm_path(const struct uevent *uev)
933 {
934         return uevent_get_dm_str(uev, "DM_PATH");
935 }
936
937 char *uevent_get_dm_action(const struct uevent *uev)
938 {
939         return uevent_get_dm_str(uev, "DM_ACTION");
940 }
941
942 bool uevent_is_mpath(const struct uevent *uev)
943 {
944         const char *uuid = uevent_get_env_var(uev, "DM_UUID");
945
946         if (uuid == NULL)
947                 return false;
948         if (strncmp(uuid, UUID_PREFIX, UUID_PREFIX_LEN))
949                 return false;
950         return uuid[UUID_PREFIX_LEN] != '\0';
951 }