lvm.rc-r1 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #!/sbin/openrc-run
  2. # Copyright 1999-2020 Gentoo Authors
  3. # Distributed under the terms of the GNU General Public License v2
  4. _get_lvm_path() {
  5. local lvm_path=
  6. for lvm_path in /bin/lvm /sbin/lvm ; do
  7. [ -x "${lvm_path}" ] && break
  8. done
  9. echo "${lvm_path}"
  10. }
  11. _use_lvmetad() {
  12. local lvm_path="$(_get_lvm_path)"
  13. [ ! -x "${lvm_path}" ] && return 1
  14. ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmetad=1'
  15. }
  16. _use_lvmlockd() {
  17. local lvm_path="$(_get_lvm_path)"
  18. [ ! -x "${lvm_path}" ] && return 1
  19. ${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1'
  20. }
  21. depend() {
  22. before checkfs fsck
  23. after modules device-mapper
  24. # We may want lvmetad based on the configuration. If we added lvmetad
  25. # support while lvm2 is running then we aren't dependent on it. For the
  26. # more common case, if its disabled in the config we aren't dependent
  27. # on it.
  28. config /etc/lvm/lvm.conf
  29. local _want=
  30. if service_started ; then
  31. _want=$(service_get_value want)
  32. else
  33. if _use_lvmetad ; then
  34. _want="${_want} lvmetad"
  35. fi
  36. if _use_lvmlockd ; then
  37. _want="${_want} lvmlockd"
  38. fi
  39. fi
  40. # Make sure you review /etc/conf.d/lvm as well!
  41. # Depending on your system, it might also introduce udev & mdraid
  42. need sysfs
  43. if [ -n "${_want}" ] ; then
  44. want ${_want}
  45. fi
  46. }
  47. config='global { locking_dir = "/run/lock/lvm" }'
  48. dm_in_proc() {
  49. local retval=0
  50. for x in devices misc ; do
  51. grep -qs 'device-mapper' /proc/${x}
  52. retval=$((${retval} + $?))
  53. done
  54. return ${retval}
  55. }
  56. start() {
  57. # LVM support for /usr, /home, /opt ....
  58. # This should be done *before* checking local
  59. # volumes, or they never get checked.
  60. # NOTE: Add needed modules for LVM or RAID, etc
  61. # to /etc/modules.autoload if needed
  62. lvm_path="$(_get_lvm_path)"
  63. if [ -z "${lvm_path}" ] ; then
  64. eerror "Failed to find lvm binary in /bin or /sbin!"
  65. return 1
  66. fi
  67. if [ -z "${CDBOOT}" ] ; then
  68. if [ -e /proc/modules ] && ! dm_in_proc ; then
  69. ebegin "Trying to load dm-mod module"
  70. modprobe dm-mod 2>/dev/null
  71. eend $?
  72. fi
  73. if [ -d /proc/lvm ] || dm_in_proc ; then
  74. local has_errors=0 verbose_command
  75. yesno "${rc_verbose}" && verbose_command=" -v"
  76. ebegin "Starting the Logical Volume Manager"
  77. if _use_lvmetad ; then
  78. # Extra PV find pass because some devices might not have been available until very recently
  79. ${lvm_path} pvscan${verbose_command} --config "${config}" --cache
  80. [ $? -ne 0 ] && has_errors=1
  81. fi
  82. # Now make the nodes
  83. ${lvm_path} vgscan${verbose_command} --config "${config}" --mknodes
  84. [ $? -ne 0 ] && has_errors=1
  85. # Enable all VGs
  86. ${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate y
  87. [ $? -ne 0 ] && has_errors=1
  88. if _use_lvmlockd ; then
  89. # Start lockd VGs as required
  90. ${lvm_path} vgchange${verbose_command} --config "${config}" --lock-start --lock-opt auto
  91. [ $? -ne 0 ] && has_errors=1
  92. fi
  93. eend ${has_errors} "Failed to start the Logical Volume Manager"
  94. fi
  95. fi
  96. }
  97. start_post() {
  98. local _want=
  99. if _use_lvmetad ; then
  100. _want="${_want} lvmetad"
  101. fi
  102. if _use_lvmlockd ; then
  103. _want="${_want} lvmlockd"
  104. fi
  105. service_set_value want "${_want}"
  106. }
  107. stop() {
  108. lvm_path="$(_get_lvm_path)"
  109. if [ -z "${lvm_path}" ] ; then
  110. eerror "Failed to find lvm binary in /bin or /sbin!"
  111. return 1
  112. fi
  113. # Stop LVM2
  114. if [ -f /etc/lvmtab -o -d /etc/lvm ] \
  115. && [ -d /proc/lvm -o "$(grep device-mapper /proc/misc 2>/dev/null)" ]
  116. then
  117. local VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null)
  118. if [ -z "${VGS}" ] ; then
  119. # nothing to do for us
  120. return 0
  121. fi
  122. local has_errors=0 verbose_command eend_cmd="eend"
  123. yesno "${rc_verbose}" && verbose_command=" -v"
  124. local msg="Failed to stop Logical Volume Manager"
  125. if [ "${RC_RUNLEVEL}" = shutdown ] ; then
  126. # failures on shutdown are non-fatal
  127. eend_cmd="ewend"
  128. msg="${msg} (possibly some LVs still needed for /usr or root)"
  129. fi
  130. ebegin "Stopping the Logical Volume Manager"
  131. ${lvm_path} vgchange${verbose_command} --config "${config}" --sysinit --activate n
  132. [ $? -ne 0 ] && has_errors=1
  133. ${eend_cmd} ${has_errors} "${msg}"
  134. fi
  135. # at this point make sure we always exit without indicating an error
  136. return 0
  137. }
  138. # vim:ts=4