- with mdadm 3.x and udev we don't need mdadm.conf to try to init arrays
authorMarcin Krol <hawk@tld-linux.org>
Tue, 22 Jul 2014 16:11:01 +0000 (16:11 +0000)
committerMarcin Krol <hawk@tld-linux.org>
Tue, 22 Jul 2014 16:11:01 +0000 (16:11 +0000)
- try starting raid before trying lvm - lvm on raid is common setup

rc-scripts-raid.patch [new file with mode: 0644]

diff --git a/rc-scripts-raid.patch b/rc-scripts-raid.patch
new file mode 100644 (file)
index 0000000..c2d6d76
--- /dev/null
@@ -0,0 +1,90 @@
+diff -ur rc-scripts-0.4.12.orig/rc.d/rc.sysinit rc-scripts-0.4.12/rc.d/rc.sysinit
+--- rc-scripts-0.4.12.orig/rc.d/rc.sysinit     2014-06-19 20:04:44.000000000 +0000
++++ rc-scripts-0.4.12/rc.d/rc.sysinit  2014-07-22 16:10:23.162000000 +0000
+@@ -305,61 +305,16 @@
+ # mdadm - manage MD devices aka Linux Software RAID
+ init_mdadm() {
+-      if [ ! -x /sbin/mdadm -o ! -f /etc/mdadm.conf ]; then
++      if [ ! -x /sbin/mdadm ]; then
+               return
+       fi
+       modprobe -s md
+-      local rc=0 golvm=0 have_md=0
+       if [ ! -f /proc/mdstat ]; then
+               return
+       fi
+-      # assume we have md if at least one ARRAY line is present
+-      if grep -qE "^([[:blank:]]|)ARRAY[[:blank:]]" /etc/mdadm.conf; then
+-              have_md=1
+-      fi
+-      # configured to do auto scanning
+-      if [ $have_md = 0 ] && grep -qE "^([[:blank:]]|)DEVICE[[:blank:]]partitions" /etc/mdadm.conf; then
+-              have_md=1
+-      fi
+-
+-      # none found
+-      if [ $have_md = 0 ]; then
+-              return
+-      fi
+-
+-      show "Starting up RAID devices"; busy
+-      /sbin/mdadm --assemble --scan --auto=yes
+-      rc=$?
+-      if [ "$rc" -eq 0 -o "$rc" -eq 2 ]; then
+-              # rc is used later too, so set sane value
+-              rc=0
+-              deltext; ok
+-              golvm=1
+-      else
+-              deltext; fail
+-      fi
+-
+-      # A non-zero return means there were problems
+-      if [ $rc -gt 0 ]; then
+-              [ -e /proc/splash ] && echo "verbose" > /proc/splash
+-              show "Starting up RAID devices"; fail
+-
+-              PS1="$(nls '(RAID Repair)# ')"; export PS1
+-              repair_shell "$(nls '*** An error occurred during the RAID startup.')"
+-      fi
+-
+-      # LVM on RAID (keep in sync with LVM init)
+-      if [ "$golvm" -eq "1" ]; then
+-              if [ -x /sbin/vgscan -a -x /sbin/vgchange ]; then
+-                      run_cmd "Scanning for LVM volume groups (on RAID)" /sbin/vgscan $lvmignorelocking
+-                      run_cmd "Activating LVM volume groups (on RAID)" /sbin/vgchange -a y $lvmsysinit
+-                      [ "$lvmversion" = "2" ] && /sbin/vgmknodes
+-              fi
+-      fi
+-      show "Starting up RAID devices"; ok
+-      return $rc
++      run_cmd "Starting up RAID devices" /sbin/mdadm --assemble --scan
+ }
+ # Init LVM
+@@ -832,7 +787,10 @@
+               fi
+       fi
+-      # Init LVM
++      if ! is_no "$MDADM"; then
++              init_mdadm
++      fi
++
+       if ! is_no "$LVM2"; then
+               init_lvm
+       fi
+@@ -844,10 +802,6 @@
+               [ $delay_cryptsetup = 0 ] && ok || fail
+       fi
+-      if ! is_no "$MDADM"; then
+-              init_mdadm
+-      fi
+-
+       _RUN_QUOTACHECK=0
+       # Check filesystems
+       if [ -z "$fastboot" ] && [ -z "$nofsck" ]; then