5 #include <linux/unistd.h>
10 #include <libdevmapper.h>
15 #include <sys/mount.h>
21 #include <sysfs/libsysfs.h>
22 #include <sysfs/dlist.h>
28 #include <path_state.h>
39 #include <blacklist.h>
44 #include <devmapper.h>
46 #include <discovery.h>
50 #include <switchgroup.h>
54 #include "clone_platform.h"
59 #define FILE_NAME_SIZE 256
61 #define MAX_REPLY_LEN 1000
63 #define CALLOUT_DIR "/var/cache/multipathd"
65 #define LOG_MSG(a,b) \
67 condlog(1, "%s: %s", b, a); \
68 memset(a, 0, MAX_CHECKER_MSG_SIZE); \
73 fprintf(stderr, "%s:%s(%i) lock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
76 fprintf(stderr, "%s:%s(%i) unlock %p\n", __FILE__, __FUNCTION__, __LINE__, a); \
77 pthread_mutex_unlock(a)
79 #define lock(a) pthread_mutex_lock(a)
80 #define unlock(a) pthread_mutex_unlock(a)
87 pthread_mutex_t *lock;
95 char mapname[WWID_SIZE];
96 struct paths *allpaths;
103 struct event_thread * wp;
105 wp = MALLOC(sizeof(struct event_thread));
110 wp->thread = MALLOC(sizeof(pthread_t));
119 condlog(0, "failed to alloc waiter");
124 set_paths_owner (struct paths * allpaths, struct multipath * mpp)
132 vector_foreach_slot (allpaths->pathvec, pp, i) {
133 if (!strncmp(mpp->wwid, pp->wwid, WWID_SIZE)) {
134 condlog(4, "%s ownership set", pp->dev_t);
141 unset_paths_owner (struct paths * allpaths, struct multipath * mpp)
146 vector_foreach_slot (allpaths->pathvec, pp, i) {
147 if (pp->mpp == mpp) {
148 condlog(4, "%s is orphaned", pp->dev_t);
155 update_multipath_table (struct multipath *mpp, vector pathvec)
160 if (dm_get_map(mpp->alias, &mpp->size, mpp->params))
163 if(disassemble_map(pathvec, mpp->params, mpp))
170 update_multipath_status (struct multipath *mpp)
175 if(dm_get_status(mpp->alias, mpp->status))
178 if (disassemble_status(mpp->status, mpp))
185 update_multipath_strings (struct multipath *mpp, vector pathvec)
187 if (update_multipath_table(mpp, pathvec))
190 if (update_multipath_status(mpp))
197 setup_multipath (struct paths * allpaths, struct multipath * mpp)
201 wwid = get_mpe_wwid(mpp->alias);
204 strncpy(mpp->wwid, wwid, WWID_SIZE);
207 strncpy(mpp->wwid, mpp->alias, WWID_SIZE);
209 condlog(4, "discovered map %s", mpp->alias);
211 if (update_multipath_strings(mpp, allpaths->pathvec))
214 set_paths_owner(allpaths, mpp);
215 mpp->mpe = find_mpe(mpp->wwid);
216 select_pgfailback(mpp);
220 free_multipath(mpp, KEEP_PATHS);
221 condlog(0, "failed to setup multipath");
226 switch_to_pathgroup (char * str)
233 p += get_word(p, &mapname);
238 p += get_word(p, &buff);
243 dm_switchgroup(mapname, atoi(buff));
251 switch_pathgroup (struct multipath * mpp)
253 struct pathgroup * pgp;
257 if (!mpp || mpp->pgfailback == FAILBACK_MANUAL)
260 * Refresh path priority values
262 vector_foreach_slot (mpp->pg, pgp, i)
263 vector_foreach_slot (pgp->paths, pp, j)
264 pathinfo(pp, conf->hwtable, DI_PRIO);
266 select_path_group(mpp); /* sets mpp->nextpg */
267 pgp = VECTOR_SLOT(mpp->pg, mpp->nextpg - 1);
269 if (pgp && pgp->status != PGSTATE_ACTIVE) {
270 dm_switchgroup(mpp->alias, mpp->nextpg);
271 condlog(2, "%s: switch to path group #%i",
272 mpp->alias, mpp->nextpg);
277 update_multipath (struct paths *allpaths, char *mapname)
279 struct multipath *mpp;
280 struct pathgroup *pgp;
285 lock(allpaths->lock);
286 mpp = find_mp(allpaths->mpvec, mapname);
291 free_pgvec(mpp->pg, KEEP_PATHS);
294 setup_multipath(allpaths, mpp);
297 * compare checkers states with DM states
299 vector_foreach_slot (mpp->pg, pgp, i) {
300 vector_foreach_slot (pgp->paths, pp, j) {
301 if (pp->dmstate != PSTATE_FAILED)
304 if (pp->state != PATH_DOWN) {
305 condlog(2, "%s: mark as failed", pp->dev_t);
306 pp->state = PATH_DOWN;
310 * schedule the next check earlier
312 if (pp->tick > conf->checkint)
313 pp->tick = conf->checkint;
319 unlock(allpaths->lock);
322 condlog(0, "failed to update multipath");
328 * returns the reschedule delay
329 * negative means *stop*
332 waiteventloop (struct event_thread * waiter)
336 int r = 1; /* upon problem reschedule 1s later */
338 if (!waiter->event_nr)
339 waiter->event_nr = dm_geteventnr(waiter->mapname);
341 if (!(dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
344 if (!dm_task_set_name(dmt, waiter->mapname))
347 if (waiter->event_nr && !dm_task_set_event_nr(dmt, waiter->event_nr))
350 dm_task_no_open_count(dmt);
360 condlog(2, "devmap event (%i) on %s",
361 waiter->event_nr, waiter->mapname);
366 * 1) a table reload, which means our mpp structure is
367 * obsolete : refresh it through update_multipath()
368 * 2) a path failed by DM : mark as such through
370 * 3) map has gone away : stop the thread.
371 * 4) a path reinstate : nothing to do
372 * 5) a switch group : nothing to do
374 if (update_multipath(waiter->allpaths, waiter->mapname)) {
375 r = -1; /* stop the thread */
378 event_nr = dm_geteventnr(waiter->mapname);
380 if (waiter->event_nr == event_nr)
383 waiter->event_nr = event_nr;
387 dm_task_destroy(dmt);
392 waitevent (void * et)
395 struct event_thread *waiter;
397 mlockall(MCL_CURRENT | MCL_FUTURE);
399 waiter = (struct event_thread *)et;
400 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
403 r = waiteventloop(waiter);
411 pthread_exit(waiter->thread);
417 free_waiter (struct event_thread * wp)
424 stop_waiter_thread (struct multipath * mpp, struct paths * allpaths)
426 struct event_thread * wp;
431 wp = (struct event_thread *)mpp->waiter;
436 condlog(2, "reap event checker : %s", wp->mapname);
437 pthread_cancel(*wp->thread);
444 start_waiter_thread (struct multipath * mpp, struct paths * allpaths)
447 struct event_thread * wp;
452 if (pthread_attr_init(&attr))
455 pthread_attr_setstacksize(&attr, 32 * 1024);
461 mpp->waiter = (void *)wp;
462 strncpy(wp->mapname, mpp->alias, WWID_SIZE);
463 wp->allpaths = allpaths;
465 if (pthread_create(wp->thread, &attr, waitevent, wp)) {
466 condlog(0, "%s: cannot create event checker", wp->mapname);
469 condlog(2, "%s: event checker started", wp->mapname);
475 condlog(0, "failed to start waiter thread");
480 remove_map (struct multipath * mpp, struct paths * allpaths)
484 stop_waiter_thread(mpp, allpaths);
485 i = find_slot(allpaths->mpvec, (void *)mpp);
486 vector_del_slot(allpaths->mpvec, i);
487 free_multipath(mpp, KEEP_PATHS);
488 unset_paths_owner(allpaths, mpp);
492 uev_add_map (char * devname, struct paths * allpaths)
495 char dev_t[BLK_DEV_SIZE];
497 struct multipath * mpp;
499 if (sysfs_get_dev(sysfs_path, devname, dev_t, BLK_DEV_SIZE))
502 if (sscanf(dev_t, "%d:%d", &major, &minor) != 2)
505 buff = dm_mapname(major, minor, "multipath");
510 mpp = find_mp(allpaths->mpvec, buff);
514 * devmap already in mpvec
515 * but remove DM uevent are somewhet unreliable
516 * so for now consider safer to remove and re-add the map
518 condlog(2, "%s: remove dead config", mpp->alias);
519 remove_map(mpp, allpaths);
523 mpp = alloc_multipath();
529 mpp->alias = MALLOC(strlen(buff) + 1);
534 strncat(mpp->alias, buff, strlen(buff));
536 dm_get_map(mpp->alias, &mpp->size, mpp->params);
537 dm_get_status(mpp->alias, mpp->status);
539 if (setup_multipath(allpaths, mpp))
540 return 1; /* mpp freed in setup_multipath */
542 if (!vector_alloc_slot(allpaths->mpvec))
545 vector_set_slot(allpaths->mpvec, mpp);
546 set_paths_owner(allpaths, mpp);
548 if (start_waiter_thread(mpp, allpaths))
553 free_multipath(mpp, KEEP_PATHS);
558 uev_remove_map (char * devname, struct paths * allpaths)
561 struct multipath * mpp;
563 minor = atoi(devname + 3);
564 mpp = find_mp_by_minor(allpaths->mpvec, minor);
567 remove_map(mpp, allpaths);
573 uev_add_path (char * devname, struct paths * allpaths)
577 pp = find_path_by_dev(allpaths->pathvec, devname);
580 condlog(3, "%s: already in pathvec");
583 condlog(2, "add %s path checker", devname);
584 pp = store_pathinfo(allpaths->pathvec, conf->hwtable,
585 devname, DI_SYSFS | DI_WWID);
590 pp->mpp = find_mp_by_wwid(allpaths->mpvec, pp->wwid);
593 condlog(4, "%s: ownership set to %s",
594 pp->dev_t, pp->mpp->alias);
596 condlog(4, "%s: orphaned", pp->dev_t);
602 uev_remove_path (char * devname, struct paths * allpaths)
607 pp = find_path_by_dev(allpaths->pathvec, devname);
610 condlog(3, "%s: not in pathvec");
613 condlog(2, "remove %s path checker", devname);
614 i = find_slot(allpaths->pathvec, (void *)pp);
615 vector_del_slot(allpaths->pathvec, i);
622 show_paths (struct paths * allpaths)
629 reply = MALLOC(MAX_REPLY_LEN);
635 c += sprintf(c, "\n");
637 vector_foreach_slot(allpaths->pathvec, pp, i) {
638 c += sprintf(c, "%10s: ", pp->dev);
641 c += sprintf(c, "[orphan]\n");
645 c += sprintf(c, "state %i, ", pp->state);
648 k = pp->checkint - pp->tick;
649 c += sprintf(c, "%3i/%3i ", j, pp->checkint);
652 c += sprintf(c, "X");
656 c += sprintf(c, ".");
658 c += sprintf(c, "\n");
665 show_maps (struct paths * allpaths)
668 struct multipath * mpp;
672 reply = MALLOC(MAX_REPLY_LEN);
678 c += sprintf(c, "\n");
680 vector_foreach_slot(allpaths->mpvec, mpp, i) {
681 c += sprintf(c, "%20s: ", mpp->alias);
683 if (!mpp->failback_tick) {
684 c += sprintf(c, "[no scheduled failback]\n");
688 j = mpp->failback_tick;
689 k = mpp->pgfailback - mpp->failback_tick;
690 c += sprintf(c, "%3i/%3i ", j, mpp->pgfailback);
693 c += sprintf(c, "X");
697 c += sprintf(c, ".");
699 c += sprintf(c, "\n");
706 uxsock_trigger (char * str, void * trigger_data)
708 struct paths * allpaths;
711 allpaths = (struct paths *)trigger_data;
713 lock(allpaths->lock);
715 if (*str == 'l' && *(str + 1) == 'p')
716 reply = show_paths(allpaths);
718 else if (*str == 'l' && *(str + 1) == 'm')
719 reply = show_maps(allpaths);
721 else if (*str == 'r' && *(str + 1) == 'p')
722 uev_remove_path(str + 3, allpaths);
724 else if (*str == 'a' && *(str + 1) == 'p')
725 uev_add_path(str + 3, allpaths);
727 else if (*str == 'r' && *(str + 1) == 'm')
728 uev_remove_map(str + 3, allpaths);
730 else if (*str == 'a' && *(str + 1) == 'm')
731 uev_add_map(str + 3, allpaths);
733 else if (*str == 's' && *(str + 1) == 'g')
734 switch_to_pathgroup(str + 3);
737 asprintf(&reply, "ok\n");
739 unlock(allpaths->lock);
745 uev_trigger (struct uevent * uev, void * trigger_data)
749 struct paths * allpaths;
751 allpaths = (struct paths *)trigger_data;
752 lock(allpaths->lock);
754 if (strncmp(uev->devpath, "/block", 6))
757 basename(uev->devpath, devname);
760 * device map add/remove event
762 if (!strncmp(devname, "dm-", 3)) {
763 condlog(2, "%s %s devmap", uev->action, devname);
765 if (!strncmp(uev->action, "add", 3)) {
766 r = uev_add_map(devname, allpaths);
769 if (!strncmp(uev->action, "remove", 6)) {
770 r = uev_remove_map(devname, allpaths);
777 * path add/remove event
779 if (blacklist(conf->blist, devname))
782 if (!strncmp(uev->action, "add", 3)) {
783 r = uev_add_path(devname, allpaths);
786 if (!strncmp(uev->action, "remove", 6)) {
787 r = uev_remove_path(devname, allpaths);
793 unlock(allpaths->lock);
798 ueventloop (void * ap)
800 uevent_listen(&uev_trigger, ap);
806 uxlsnrloop (void * ap)
808 uxsock_listen(&uxsock_trigger, ap);
814 strvec_free (vector vec)
819 vector_foreach_slot (vec, str, i)
827 exit_daemon (int status)
830 fprintf(stderr, "bad exit status. see daemon.log\n");
832 condlog(3, "umount ramfs");
835 condlog(3, "unlink pidfile");
836 unlink(DEFAULT_PIDFILE);
838 condlog(2, "--------shut down-------");
847 * caller must have locked the path list before calling that function
850 get_dm_mpvec (struct paths * allpaths)
853 struct multipath * mpp;
855 if (dm_get_maps(allpaths->mpvec, "multipath"))
858 vector_foreach_slot (allpaths->mpvec, mpp, i) {
859 setup_multipath(allpaths, mpp);
860 mpp->minor = dm_get_minor(mpp->alias);
861 start_waiter_thread(mpp, allpaths);
868 fail_path (struct path * pp)
873 condlog(2, "checker failed path %s in map %s",
874 pp->dev_t, pp->mpp->alias);
876 dm_fail_path(pp->mpp->alias, pp->dev_t);
880 * caller must have locked the path list before calling that function
883 reinstate_path (struct path * pp)
886 if (dm_reinstate(pp->mpp->alias, pp->dev_t))
887 condlog(0, "%s: reinstate failed", pp->dev_t);
889 condlog(2, "%s: reinstated", pp->dev_t);
894 enable_group(struct path * pp)
896 struct pathgroup * pgp;
898 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
900 if (pgp->status == PGSTATE_DISABLED) {
901 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
902 dm_enablegroup(pp->mpp->alias, pp->pgindex);
907 checkerloop (void *ap)
909 struct paths *allpaths;
913 char checker_msg[MAX_CHECKER_MSG_SIZE];
915 mlockall(MCL_CURRENT | MCL_FUTURE);
917 memset(checker_msg, 0, MAX_CHECKER_MSG_SIZE);
918 allpaths = (struct paths *)ap;
920 condlog(2, "path checkers start up");
923 lock(allpaths->lock);
926 vector_foreach_slot (allpaths->pathvec, pp, i) {
932 * don't check this path yet
939 * provision a next check soonest,
940 * in case we exit abnormaly from here
942 pp->tick = conf->checkint;
945 pathinfo(pp, conf->hwtable, DI_SYSFS);
950 condlog(0, "%s: checkfn is void", pp->dev);
953 newstate = pp->checkfn(pp->fd, checker_msg,
954 &pp->checker_context);
956 if (newstate != pp->state) {
957 pp->state = newstate;
958 LOG_MSG(checker_msg, pp->dev_t);
961 * upon state change, reset the checkint
962 * to the shortest delay
964 pp->checkint = conf->checkint;
966 if (newstate == PATH_DOWN ||
967 newstate == PATH_SHAKY) {
969 * proactively fail path in the DM
974 * cancel scheduled failback
976 pp->mpp->failback_tick = 0;
982 * reinstate this path
987 * need to switch group ?
989 update_multipath_strings(pp->mpp,
993 * schedule defered failback
995 if (pp->mpp->pgfailback > 0)
996 pp->mpp->failback_tick =
999 if (pp->mpp->pgfailback == FAILBACK_IMMEDIATE)
1000 switch_pathgroup(pp->mpp);
1003 * if at least one path is up in a group, and
1004 * the group is disabled, re-enable it
1006 if (newstate == PATH_UP)
1009 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1011 * PATH_UP for last two checks
1012 * defered failback getting sooner
1014 if (pp->mpp->pgfailback > 0) {
1015 if (pp->mpp->failback_tick > 0) {
1016 pp->mpp->failback_tick--;
1018 if (!pp->mpp->failback_tick)
1019 switch_pathgroup(pp->mpp);
1024 * and double the next check delay.
1025 * max at conf->max_checkint
1027 if (pp->checkint < (conf->max_checkint / 2))
1028 pp->checkint = 2 * pp->checkint;
1030 pp->checkint = conf->max_checkint;
1032 pp->tick = pp->checkint;
1033 condlog(4, "%s: delay next check %is",
1034 pp->dev_t, pp->tick);
1037 pp->state = newstate;
1039 unlock(allpaths->lock);
1045 static struct paths *
1048 struct paths *allpaths;
1050 allpaths = MALLOC(sizeof(struct paths));
1056 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1058 if (!allpaths->lock)
1061 allpaths->pathvec = vector_alloc();
1063 if (!allpaths->pathvec)
1066 allpaths->mpvec = vector_alloc();
1068 if (!allpaths->mpvec)
1071 pthread_mutex_init(allpaths->lock, NULL);
1076 vector_free(allpaths->pathvec);
1078 FREE(allpaths->lock);
1081 condlog(0, "failed to init paths");
1086 * this logic is all about keeping callouts working in case of
1087 * system disk outage (think system over SAN)
1088 * this needs the clone syscall, so don't bother if not present
1093 prepare_namespace(void)
1095 mode_t mode = S_IRWXU;
1097 char ramfs_args[64];
1102 struct stat statbuf;
1104 buf = MALLOC(sizeof(struct stat));
1107 * create a temp mount point for ramfs
1109 if (stat(CALLOUT_DIR, buf) < 0) {
1110 if (mkdir(CALLOUT_DIR, mode) < 0) {
1111 condlog(0, "cannot create " CALLOUT_DIR);
1114 condlog(4, "created " CALLOUT_DIR);
1118 * compute the optimal ramdisk size
1120 vector_foreach_slot (conf->binvec, bin,i) {
1121 if ((fd = open(bin, O_RDONLY)) < 0) {
1122 condlog(0, "cannot open %s", bin);
1125 if (fstat(fd, &statbuf) < 0) {
1126 condlog(0, "cannot stat %s", bin);
1129 size += statbuf.st_size;
1132 condlog(3, "ramfs maxsize is %u", (unsigned int) size);
1137 if (safe_sprintf(ramfs_args, "maxsize=%u", (unsigned int) size)) {
1138 fprintf(stderr, "ramfs_args too small\n");
1141 if (mount(NULL, CALLOUT_DIR, "ramfs", MS_SYNCHRONOUS, ramfs_args) < 0) {
1142 condlog(0, "cannot mount ramfs on " CALLOUT_DIR);
1145 condlog(4, "mount ramfs on " CALLOUT_DIR);
1148 * populate the ramfs with callout binaries
1150 vector_foreach_slot (conf->binvec, bin,i) {
1151 if (copytodir(bin, CALLOUT_DIR) < 0) {
1152 condlog(0, "cannot copy %s in ramfs", bin);
1155 condlog(4, "cp %s in ramfs", bin);
1157 strvec_free(conf->binvec);
1160 * bind the ramfs to :
1161 * /sbin : default home of multipath ...
1162 * /bin : default home of scsi_id ...
1163 * /tmp : home of scsi_id temp files
1165 if (mount(CALLOUT_DIR, "/sbin", NULL, MS_BIND, NULL) < 0) {
1166 condlog(0, "cannot bind ramfs on /sbin");
1169 condlog(4, "bind ramfs on /sbin");
1170 if (mount(CALLOUT_DIR, "/bin", NULL, MS_BIND, NULL) < 0) {
1171 condlog(0, "cannot bind ramfs on /bin");
1174 condlog(4, "bind ramfs on /bin");
1175 if (mount(CALLOUT_DIR, "/tmp", NULL, MS_BIND, NULL) < 0) {
1176 condlog(0, "cannot bind ramfs on /tmp");
1179 condlog(4, "bind ramfs on /tmp");
1186 signal_set(int signo, void (*func) (int))
1189 struct sigaction sig;
1190 struct sigaction osig;
1192 sig.sa_handler = func;
1193 sigemptyset(&sig.sa_mask);
1196 r = sigaction(signo, &sig, &osig);
1201 return (osig.sa_handler);
1207 condlog(2, "SIGHUP received");
1210 dbg_free_final(NULL);
1223 signal_set(SIGHUP, sighup);
1224 signal_set(SIGINT, sigend);
1225 signal_set(SIGTERM, sigend);
1226 signal_set(SIGKILL, sigend);
1233 static struct sched_param sched_param = {
1237 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1240 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1245 set_oom_adj (int val)
1249 fp = fopen("/proc/self/oom_adj", "w");
1254 fprintf(fp, "%i", val);
1259 child (void * param)
1261 pthread_t check_thr, uevent_thr, uxlsnr_thr;
1262 pthread_attr_t attr;
1263 struct paths * allpaths;
1265 mlockall(MCL_CURRENT | MCL_FUTURE);
1270 condlog(2, "--------start up--------");
1271 condlog(2, "read " DEFAULT_CONFIGFILE);
1273 if (load_config(DEFAULT_CONFIGFILE))
1276 setlogmask(LOG_UPTO(conf->verbosity + 3));
1279 * fill the voids left in the config file
1281 if (!conf->binvec) {
1282 conf->binvec = vector_alloc();
1283 push_callout("/sbin/scsi_id");
1285 if (!conf->multipath) {
1286 conf->multipath = MULTIPATH;
1287 push_callout(conf->multipath);
1289 if (!conf->checkint) {
1290 conf->checkint = CHECKINT;
1291 conf->max_checkint = MAX_CHECKINT;
1294 if (pidfile_create(DEFAULT_PIDFILE, getpid())) {
1303 allpaths = init_paths();
1308 if (sysfs_get_mnt_path(sysfs_path, FILE_NAME_SIZE)) {
1309 condlog(0, "can not find sysfs mount point");
1314 if (prepare_namespace() < 0) {
1315 condlog(0, "cannot prepare namespace");
1321 * fetch paths and multipaths lists
1322 * no paths and/or no multipaths are valid scenarii
1323 * vectors maintenance will be driven by events
1325 path_discovery(allpaths->pathvec, conf, DI_SYSFS | DI_WWID);
1326 get_dm_mpvec(allpaths);
1331 pthread_attr_init(&attr);
1332 pthread_attr_setstacksize(&attr, 64 * 1024);
1334 pthread_create(&check_thr, &attr, checkerloop, allpaths);
1335 pthread_create(&uevent_thr, &attr, ueventloop, allpaths);
1336 pthread_create(&uxlsnr_thr, &attr, uxlsnrloop, allpaths);
1337 pthread_join(check_thr, NULL);
1338 pthread_join(uevent_thr, NULL);
1339 pthread_join(uxlsnr_thr, NULL);
1345 main (int argc, char *argv[])
1347 extern char *optarg;
1355 if (getuid() != 0) {
1356 fprintf(stderr, "need to be root\n");
1360 /* make sure we don't lock any path */
1362 umask(umask(077) | 022);
1364 child_stack = (void *)malloc(CHILD_STACK_SIZE);
1369 conf = alloc_config();
1374 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1380 if (sizeof(optarg) > sizeof(char *) ||
1381 !isdigit(optarg[0]))
1384 conf->verbosity = atoi(optarg);
1394 #ifdef CLONE_NEWNS /* recent systems have clone() */
1396 # if defined(__hppa__) || defined(__powerpc64__)
1397 err = clone(child, child_stack, CLONE_NEWNS, NULL);
1398 # elif defined(__ia64__)
1399 err = clone2(child, child_stack,
1400 CHILD_STACK_SIZE, CLONE_NEWNS, NULL,
1403 err = clone(child, child_stack + CHILD_STACK_SIZE, CLONE_NEWNS, NULL);
1409 #else /* older system fallback to fork() */
1415 return (child(child_stack));