specify e.g. \fI/dev/sda1\fR or
\fI/dev/disk/by-path/pci-0000:00:1f.1-scsi-0:0:1:0-part1\fR
+.SH LVM
+.TP
+.B rd_NO_LVM
+disable LVM detection
+.TP
+.B rd_LVM_VG=<volume group name>
+only activate the volume groups with the given name
+
+.SH crypto LUKS
+.TP
+.B rd_NO_LUKS
+disable crypto LUKS detection
+.TP
+.B rd_LUKS_UUID=<luks uuid>
+only activate the LUKS partitions with the given UUID
+
+.SH MD
+.TP
+.B rd_NO_MD
+disable MD RAID detection
+.TP
+.B rd_MD_UUID=<md uuid>
+only activate the raid sets with the given UUID
+
+.SH DMRAID
+.TP
+.B rd_NO_DM
+disable DM RAID detection
+.TP
+.B rd_DM_UUID=<dmraid uuid>
+only activate the raid sets with the given UUID
+
.SH DHCP
.TP
.B root=dhcp
# we already asked for this device
[ -f /tmp/cryptroot-asked-$2 ] && exit 0
-# flock against other interactive activities
-{ flock -s 9;
-/bin/plymouth ask-for-password --prompt "$1 is password protected" --command="/sbin/cryptsetup luksOpen -T1 $1 $2"
-} 9>/.console.lock
+. /lib/dracut-lib.sh
+LUKS=$(getargs rd_LUKS_UUID=)
+ask=1
+
+if [ -n "$LUKS" ]; then
+ ask=0
+ for luks in $LUKS; do
+ if [ "${2##$luks}" != "$2" ]; then
+ ask=1
+ fi
+ done
+fi
+
+if [ $ask -gt 0 ]; then
+ # flock against other interactive activities
+ { flock -s 9;
+ /bin/plymouth ask-for-password \
+ --prompt "$1 is password protected" \
+ --command="/sbin/cryptsetup luksOpen -T1 $1 $2"
+ } 9>/.console.lock
+fi
# mark device as asked
>> /tmp/cryptroot-asked-$2
+unset LUKS
+unset ask
+unset luks
exit 0
# we already asked for this device
[ -f /tmp/cryptroot-asked-$2 ] && exit 0
-# flock against other interactive activities
-{ flock -s 9;
- echo -n "$1 is password protected "
- /sbin/cryptsetup luksOpen -T1 $1 $2
-} 9>/.console.lock
+. /lib/dracut-lib.sh
+LUKS=$(getargs rd_LUKS_UUID=)
+ask=1
+
+if [ -n "$LUKS" ]; then
+ ask=0
+ for luks in $LUKS; do
+ if [ "${2##$luks}" != "$2" ]; then
+ ask=1
+ fi
+ done
+fi
+
+if [ $ask -gt 0 ]; then
+ # flock against other interactive activities
+ { flock -s 9;
+ echo -n "$1 is password protected "
+ /sbin/cryptsetup luksOpen -T1 $1 $2
+ } 9>/.console.lock
+fi
# mark device as asked
>> /tmp/cryptroot-asked-$2
instmods dm_crypt cbc aes sha256 xts
inst_rules "$moddir/70-luks.rules"
inst "$moddir"/cryptroot-ask.sh /sbin/cryptroot-ask
+inst_hook cmdline 30 "$moddir/parse-crypt.sh"
--- /dev/null
+initrdargs="$initrdargs rd_NO_LUKS rd_LUKS_UUID"
+
+if getarg rd_NO_LUKS; then
+ rm -f /etc/udev/rules.d/70-luks.rules
+fi
+
if $UDEV_QUEUE_EMPTY >/dev/null 2>&1; then
[ -h "$job" ] && rm -f "$job"
+ DM_RAIDS=$(getargs rd_DM_UUID=)
# run dmraid if udev has settled
- dmraid -ay
+ info "Scanning for dmraid devices $DM_RAIDS"
+ SETS=$(dmraid -c -s)
+ info "Found dmraid sets:"
+ echo $SETS|vinfo
+ for r in $DM_RAIDS; do
+ for s in $SETS; do
+ if [ "${s##$r}" != "$s" ]; then
+ info "Activating $s"
+ dmraid -ay $s | vinfo
+ fi
+ done
+ done
fi
--- /dev/null
+initrdargs="$initrdargs rd_DM_UUID rd_NO_DM"
+
+if getarg rd_NO_DM; then
+ rm /etc/udev/rules.d/61-dmraid*.rules
+fi
\ No newline at end of file
fi
inst "$moddir/lvm_scan.sh" /sbin/lvm_scan
+inst_hook cmdline 30 "$moddir/parse-lvm.sh"
[ -h "$job" ] && rm -f "$job"
# run lvm scan if udev has settled
+ VGS=$(getargs rd_LVM_VG=)
+
[ -d /etc/lvm ] || mkdir -p /etc/lvm
# build a list of devices to scan
lvmdevs=$(
for f in /tmp/.lvm_scan-*; do
[ -e "$f" ] || continue
- echo ${f##/tmp/.lvm_scan-}
+ echo -n "${f##/tmp/.lvm_scan-} "
done
)
{
printf '"a|^/dev/%s$|", ' $dev;
done;
echo '"r/.*/" ]';
+ echo 'types = [ "blkext", 1024 ]'
echo '}';
} > /etc/lvm/lvm.conf
- lvm vgscan
- lvm vgchange -ay
+ info "Scanning devices $lvmdevs for LVM volume groups $VGS"
+ lvm vgscan 2>&1 | vinfo
+ lvm vgchange -ay $VGS 2>&1 | vinfo
fi
--- /dev/null
+initrdargs="$initrdargs rd_NO_LVM rd_LVM_VG"
+
+if getarg rd_NO_LVM; then
+ rm -f /etc/udev/rules.d/64-lvm*.rules
+fi
+
-# This file causes block devices with Linux RAID (mdadm) signatures to
-# automatically cause mdadm to be run.
-# See udev(8) for syntax
-
-SUBSYSTEM!="block", GOTO="raid_end"
-ACTION!="add|change", GOTO="raid_end"
-KERNEL=="md/*", GOTO="raid_end"
-
-KERNEL=="md*", IMPORT{program}="vol_id --export $tempnode"
-ENV{ID_FS_TYPE}=="linux_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- RUN+="/sbin/mdadm -I $env{DEVNAME}", \
- RUN+="/bin/sh -c '>/tmp/.mdraid_start-%k; /bin/ln -s /sbin/mdraid_start /initqueue/mdraid_start.sh'"
-
-ENV{ID_FS_TYPE}=="linux_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- ATTR{partition}!="?*", \
- RUN+="/sbin/partx -d $env{DEVNAME}"
-
-LABEL="raid_end"
+SUBSYSTEM=="block", ACTION=="add|change", KERNEL=="md[0-9]*|md_d[0-9]*|md/*", IMPORT{program}="vol_id --export $tempnode"
# automatically cause mdadm to be run.
# See udev(8) for syntax
-SUBSYSTEM=="block", ACTION=="add", \
- ENV{ID_FS_TYPE}=="linux_raid_member|isw_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- IMPORT{program}="/sbin/mdadm --examine --export $tempnode", \
- RUN+="/sbin/mdadm -I --no-degraded $env{DEVNAME}", \
- RUN+="/bin/sh -c '>/tmp/.mdraid_start-%k; /bin/ln -s /sbin/mdraid_start /initqueue/mdraid_start.sh'"
-
-SUBSYSTEM=="block", ACTION=="add", \
- ENV{ID_FS_TYPE}=="linux_raid_member|isw_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- ATTR{partition}!="?*", \
- RUN+="/sbin/partx -d $env{DEVNAME}"
+ACTION!="add", GOTO="md_inc_end"
+SUBSYSTEM!="block", GOTO="md_inc_end"
+ENV{ID_FS_TYPE}!="linux_raid_member|isw_raid_member", GOTO="md_inc_end"
+
+TEST=="/tmp/.mdraid_start-%k", GOTO="md_inc_end"
+
+IMPORT{program}="/sbin/mdadm --examine --export $tempnode"
+
+# UUID CHECK
+
+LABEL="do_md_inc"
+
+RUN+="/sbin/mdadm -I --no-degraded $env{DEVNAME}", RUN+="/bin/sh -c '>/tmp/.mdraid_start-%k; /bin/ln -s /sbin/mdraid_start /initqueue/mdraid_start.sh'"
+
+ATTR{partition}!="?*", RUN+="/sbin/partx -d $env{DEVNAME}"
+
+LABEL="md_inc_end"
# automatically cause mdadm to be run.
# See udev(8) for syntax
-SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- IMPORT{program}="/sbin/mdadm --examine --export $tempnode", \
- RUN+="/sbin/mdadm -I $env{DEVNAME}", \
- RUN+="/bin/sh -c '>/tmp/.mdraid_start-%k; /bin/ln -s /sbin/mdraid_start /initqueue/mdraid_start.sh'"
-
-SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
- TEST!="/tmp/.mdraid_start-%k", \
- ATTR{partition}!="?*", \
- RUN+="/sbin/partx -d $env{DEVNAME}"
+ACTION!="add", GOTO="md_inc_end"
+SUBSYSTEM!="block", GOTO="md_inc_end"
+ENV{ID_FS_TYPE}!="linux_raid_member", GOTO="md_inc_end"
+
+TEST=="/tmp/.mdraid_start-%k", GOTO="md_inc_end"
+
+IMPORT{program}="/sbin/mdadm --examine --export $tempnode"
+
+# UUID CHECK
+
+LABEL="do_md_inc"
+
+RUN+="/sbin/mdadm -I --no-degraded $env{DEVNAME}"
+
+RUN+="/bin/sh -c '>/tmp/.mdraid_start-%k; /bin/ln -s /sbin/mdraid_start /initqueue/mdraid_start.sh'"
+
+ATTR{partition}!="?*", RUN+="/sbin/partx -d $env{DEVNAME}"
+
+LABEL="md_inc_end"
if [ -x /lib/udev/vol_id ]; then
inst_rules "$moddir/61-mdadm.rules"
else
- if mdadm -Q -e imsm /dev/null &> /dev/null; then
- inst_rules "$moddir/65-md-incremental-imsm.rules"
- else
- inst_rules "$moddir/65-md-incremental.rules"
- fi
inst_rules 64-md-raid.rules
fi
+if mdadm -Q -e imsm /dev/null &> /dev/null; then
+ inst_rules "$moddir/65-md-incremental-imsm.rules"
+else
+ inst_rules "$moddir/65-md-incremental.rules"
+fi
+
+
[ -f /etc/mdadm/mdadm.conf ] && inst /etc/mdadm/mdadm.conf /etc/mdadm.conf
[ -f /etc/mdadm.conf ] && inst /etc/mdadm.conf
if [ -x /sbin/mdmon ] ; then
fi
inst "$moddir/mdraid_start.sh" /sbin/mdraid_start
-inst grep
+inst_hook cmdline 30 "$moddir/parse-md.sh"
if $UDEV_QUEUE_EMPTY >/dev/null 2>&1; then
[ -h "$job" ] && rm -f "$job"
# run mdadm if udev has settled
- mdadm -IRs
+ info "Assembling MD RAID arrays"
+
# and activate any containers
for md in /dev/md?*; do
case $md in
- /dev/md*p*) ;;
- *)
- if mdadm --export --detail $md | grep -q container; then
- mdadm -IR $md
- fi
+ /dev/md*p*) ;;
+ *)
+ info "Starting MD RAID array $md"
+ mdadm -R $md 2>&1 | vinfo
+ mdadm -IR $md 2>&1 | vinfo
esac
done
fi
--- /dev/null
+initrdargs="$initrdargs rd_MD_UUID rd_NO_MD"
+
+if $(getarg rd_NO_MD); then
+ rm /etc/udev/rules.d/65-md-incremental*.rules
+else
+ MD_UUID=$(getargs rd_MD_UUID=)
+
+ # rewrite the md rules to only process the specified raid array
+ if [ -n "$MD_UUID" ]; then
+ for f in /etc/udev/rules.d/65-md-incremental*.rules; do
+ [ -e "$f" ] || continue
+ mv $f ${f}.bak
+ while read line; do
+ if [ "${line/UUID CHECK//}" != "$line" ]; then
+ for uuid in $MD_UUID; do
+ printf 'ENV{MD_UUID}=="%s", GOTO="do_md_inc"\n' $uuid
+ done;
+ printf 'GOTO="md_inc_end"\n';
+ else
+ echo $line;
+ fi
+ done < ${f}.bak > $f
+ rm ${f}.bak
+ done
+ fi
+fi
+