summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBilly Olsen <billy.olsen@gmail.com>2017-02-06 21:59:51 -0700
committerBilly Olsen <billy.olsen@gmail.com>2017-03-03 15:24:54 -0700
commitc421aa742909f78c8b7c9a4548874795b70dad87 (patch)
tree4d2df3f1aec79a523115b2c186107657072deeb3
parent30141278c2c40664846b83b37bca60fa4666edaf (diff)
Roll osd ownership changes through node
Change the OSD upgrade path so that the file ownership change for the OSD directories are run one OSD at a time rather than all of the OSDs at once. Partial-Bug: #1662591 Change-Id: I3a1cf05207c070a8699e7ba749a0587b619d4679
Notes
Notes (review): Code-Review+1: Felipe Reyes <felipe.reyes@canonical.com> Code-Review+2: Chris Holcombe <chris.holcombe@canonical.com> Workflow+1: Chris Holcombe <chris.holcombe@canonical.com> Verified+2: Jenkins Submitted-by: Jenkins Submitted-at: Tue, 07 Mar 2017 16:42:47 +0000 Reviewed-on: https://review.openstack.org/430062 Project: openstack/charms.ceph Branch: refs/heads/master
-rw-r--r--ceph/__init__.py294
-rw-r--r--unit_tests/test_osd_upgrade_roll.py122
2 files changed, 366 insertions, 50 deletions
diff --git a/ceph/__init__.py b/ceph/__init__.py
index e87aef9..1f6196a 100644
--- a/ceph/__init__.py
+++ b/ceph/__init__.py
@@ -26,19 +26,27 @@ import errno
26import shutil 26import shutil
27import pyudev 27import pyudev
28 28
29from datetime import datetime
30
29from charmhelpers.core import hookenv 31from charmhelpers.core import hookenv
30from charmhelpers.core.host import ( 32from charmhelpers.core.host import (
31 mkdir,
32 chownr, 33 chownr,
33 service_restart, 34 cmp_pkgrevno,
34 lsb_release, 35 lsb_release,
35 cmp_pkgrevno, service_stop, mounts, service_start) 36 mkdir,
37 mounts,
38 owner,
39 service_restart,
40 service_start,
41 service_stop)
36from charmhelpers.core.hookenv import ( 42from charmhelpers.core.hookenv import (
37 log,
38 ERROR,
39 cached, 43 cached,
44 config,
45 log,
40 status_set, 46 status_set,
41 WARNING, DEBUG, config) 47 DEBUG,
48 ERROR,
49 WARNING)
42from charmhelpers.core.services import render_template 50from charmhelpers.core.services import render_template
43from charmhelpers.fetch import ( 51from charmhelpers.fetch import (
44 apt_cache, 52 apt_cache,
@@ -55,6 +63,9 @@ from charmhelpers.contrib.storage.linux.utils import (
55from charmhelpers.contrib.openstack.utils import ( 63from charmhelpers.contrib.openstack.utils import (
56 get_os_codename_install_source) 64 get_os_codename_install_source)
57 65
66CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph')
67OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd')
68
58LEADER = 'leader' 69LEADER = 'leader'
59PEON = 'peon' 70PEON = 'peon'
60QUORUM = [LEADER, PEON] 71QUORUM = [LEADER, PEON]
@@ -556,6 +567,42 @@ def get_osd_tree(service):
556 raise 567 raise
557 568
558 569
570def _get_child_dirs(path):
571 """Returns a list of directory names in the specified path.
572
573 :param path: a full path listing of the parent directory to return child
574 directory names
575 :return: list. A list of child directories under the parent directory
576 :raises: ValueError if the specified path does not exist or is not a
577 directory,
578 OSError if an error occurs reading the directory listing
579 """
580 if not os.path.exists(path):
581 raise ValueError('Specfied path "%s" does not exist' % path)
582 if not os.path.isdir(path):
583 raise ValueError('Specified path "%s" is not a directory' % path)
584
585 return filter(os.path.isdir, os.path.listdir(path))
586
587
588def _get_osd_num_from_dirname(dirname):
589 """Parses the dirname and returns the OSD id.
590
591 Parses a string in the form of 'ceph-{osd#}' and returns the osd number
592 from the directory name.
593
594 :param dirname: the directory name to return the OSD number from
595 :return int: the osd number the directory name corresponds to
596 :raises ValueError: if the osd number cannot be parsed from the provided
597 directory name.
598 """
599 match = re.search('ceph-(?P<osd_id>\d+)', dirname)
600 if not match:
601 raise ValueError("dirname not in correct format: %s" % dirname)
602
603 return match.group('osd_id')
604
605
559def get_local_osd_ids(): 606def get_local_osd_ids():
560 """ 607 """
561 This will list the /var/lib/ceph/osd/* directories and try 608 This will list the /var/lib/ceph/osd/* directories and try
@@ -1635,42 +1682,198 @@ def upgrade_osd(new_version):
1635 add_source(config('source'), config('key')) 1682 add_source(config('source'), config('key'))
1636 apt_update(fatal=True) 1683 apt_update(fatal=True)
1637 except subprocess.CalledProcessError as err: 1684 except subprocess.CalledProcessError as err:
1638 log("Adding the ceph source failed with message: {}".format( 1685 log("Adding the ceph sources failed with message: {}".format(
1639 err.message)) 1686 err.message))
1640 status_set("blocked", "Upgrade to {} failed".format(new_version)) 1687 status_set("blocked", "Upgrade to {} failed".format(new_version))
1641 sys.exit(1) 1688 sys.exit(1)
1689
1642 try: 1690 try:
1643 if systemd(): 1691 # Upgrade the packages before restarting the daemons.
1644 for osd_id in get_local_osd_ids(): 1692 status_set('maintenance', 'Upgrading packages to %s' % new_version)
1645 service_stop('ceph-osd@{}'.format(osd_id))
1646 else:
1647 service_stop('ceph-osd-all')
1648 apt_install(packages=PACKAGES, fatal=True) 1693 apt_install(packages=PACKAGES, fatal=True)
1649 1694
1650 # Ensure the files and directories under /var/lib/ceph is chowned 1695 # If the upgrade does not need an ownership update of any of the
1651 # properly as part of the move to the Jewel release, which moved the 1696 # directories in the osd service directory, then simply restart
1652 # ceph daemons to running as ceph:ceph instead of root:root. Only do 1697 # all of the OSDs at the same time as this will be the fastest
1653 # it when necessary as this is an expensive operation to run. 1698 # way to update the code on the node.
1654 if new_version == 'jewel': 1699 if not dirs_need_ownership_update('osd'):
1655 owner = ceph_user() 1700 log('Restarting all OSDs to load new binaries', DEBUG)
1656 status_set('maintenance', 'Updating file ownership for OSDs') 1701 service_restart('ceph-osd-all')
1657 chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), 1702 return
1658 owner=owner, 1703
1659 group=owner, 1704 # Need to change the ownership of all directories which are not OSD
1660 follow_links=True) 1705 # directories as well.
1706 # TODO - this should probably be moved to the general upgrade function
1707 # and done before mon/osd.
1708 update_owner(CEPH_BASE_DIR, recurse_dirs=False)
1709 non_osd_dirs = filter(lambda x: not x == 'osd',
1710 os.listdir(CEPH_BASE_DIR))
1711 non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x),
1712 non_osd_dirs)
1713 for path in non_osd_dirs:
1714 update_owner(path)
1715
1716 # Fast service restart wasn't an option because each of the OSD
1717 # directories need the ownership updated for all the files on
1718 # the OSD. Walk through the OSDs one-by-one upgrading the OSD.
1719 for osd_dir in _get_child_dirs(OSD_BASE_DIR):
1720 try:
1721 osd_num = _get_osd_num_from_dirname(osd_dir)
1722 _upgrade_single_osd(osd_num, osd_dir)
1723 except ValueError as ex:
1724 # Directory could not be parsed - junk directory?
1725 log('Could not parse osd directory %s: %s' % (osd_dir, ex),
1726 WARNING)
1727 continue
1661 1728
1662 if systemd(): 1729 except (subprocess.CalledProcessError, IOError) as err:
1663 for osd_id in get_local_osd_ids():
1664 service_start('ceph-osd@{}'.format(osd_id))
1665 else:
1666 service_start('ceph-osd-all')
1667 except subprocess.CalledProcessError as err:
1668 log("Stopping ceph and upgrading packages failed " 1730 log("Stopping ceph and upgrading packages failed "
1669 "with message: {}".format(err.message)) 1731 "with message: {}".format(err.message))
1670 status_set("blocked", "Upgrade to {} failed".format(new_version)) 1732 status_set("blocked", "Upgrade to {} failed".format(new_version))
1671 sys.exit(1) 1733 sys.exit(1)
1672 1734
1673 1735
1736def _upgrade_single_osd(osd_num, osd_dir):
1737 """Upgrades the single OSD directory.
1738
1739 :param osd_num: the num of the OSD
1740 :param osd_dir: the directory of the OSD to upgrade
1741 :raises CalledProcessError: if an error occurs in a command issued as part
1742 of the upgrade process
1743 :raises IOError: if an error occurs reading/writing to a file as part
1744 of the upgrade process
1745 """
1746 stop_osd(osd_num)
1747 disable_osd(osd_num)
1748 update_owner(os.path.join(OSD_BASE_DIR, osd_dir))
1749 enable_osd(osd_num)
1750 start_osd(osd_num)
1751
1752
1753def stop_osd(osd_num):
1754 """Stops the specified OSD number.
1755
1756 :param osd_num: the osd number to stop
1757 """
1758 if systemd():
1759 service_stop('ceph-osd@{}'.format(osd_num))
1760 else:
1761 service_stop('ceph-osd', id=osd_num)
1762
1763
1764def start_osd(osd_num):
1765 """Starts the specified OSD number.
1766
1767 :param osd_num: the osd number to start.
1768 """
1769 if systemd():
1770 service_start('ceph-osd@{}'.format(osd_num))
1771 else:
1772 service_start('ceph-osd', id=osd_num)
1773
1774
1775def disable_osd(osd_num):
1776 """Disables the specified OSD number.
1777
1778 Ensures that the specified osd will not be automatically started at the
1779 next reboot of the system. Due to differences between init systems,
1780 this method cannot make any guarantees that the specified osd cannot be
1781 started manually.
1782
1783 :param osd_num: the osd id which should be disabled.
1784 :raises CalledProcessError: if an error occurs invoking the systemd cmd
1785 to disable the OSD
1786 :raises IOError, OSError: if the attempt to read/remove the ready file in
1787 an upstart enabled system fails
1788 """
1789 if systemd():
1790 # When running under systemd, the individual ceph-osd daemons run as
1791 # templated units and can be directly addressed by referring to the
1792 # templated service name ceph-osd@<osd_num>. Additionally, systemd
1793 # allows one to disable a specific templated unit by running the
1794 # 'systemctl disable ceph-osd@<osd_num>' command. When disabled, the
1795 # OSD should remain disabled until re-enabled via systemd.
1796 # Note: disabling an already disabled service in systemd returns 0, so
1797 # no need to check whether it is enabled or not.
1798 cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)]
1799 subprocess.check_call(cmd)
1800 else:
1801 # Neither upstart nor the ceph-osd upstart script provides for
1802 # disabling the starting of an OSD automatically. The specific OSD
1803 # cannot be prevented from running manually, however it can be
1804 # prevented from running automatically on reboot by removing the
1805 # 'ready' file in the OSD's root directory. This is due to the
1806 # ceph-osd-all upstart script checking for the presence of this file
1807 # before starting the OSD.
1808 ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num),
1809 'ready')
1810 if os.path.exists(ready_file):
1811 os.unlink(ready_file)
1812
1813
1814def enable_osd(osd_num):
1815 """Enables the specified OSD number.
1816
1817 Ensures that the specified osd_num will be enabled and ready to start
1818 automatically in the event of a reboot.
1819
1820 :param osd_num: the osd id which should be enabled.
1821 :raises CalledProcessError: if the call to the systemd command issued
1822 fails when enabling the service
1823 :raises IOError: if the attempt to write the ready file in an usptart
1824 enabled system fails
1825 """
1826 if systemd():
1827 cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)]
1828 subprocess.check_call(cmd)
1829 else:
1830 # When running on upstart, the OSDs are started via the ceph-osd-all
1831 # upstart script which will only start the osd if it has a 'ready'
1832 # file. Make sure that file exists.
1833 ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num),
1834 'ready')
1835 with open(ready_file, 'w') as f:
1836 f.write('ready')
1837
1838 # Make sure the correct user owns the file. It shouldn't be necessary
1839 # as the upstart script should run with root privileges, but its better
1840 # to have all the files matching ownership.
1841 update_owner(ready_file)
1842
1843
1844def update_owner(path, recurse_dirs=True):
1845 """Changes the ownership of the specified path.
1846
1847 Changes the ownership of the specified path to the new ceph daemon user
1848 using the system's native chown functionality. This may take awhile,
1849 so this method will issue a set_status for any changes of ownership which
1850 recurses into directory structures.
1851
1852 :param path: the path to recursively change ownership for
1853 :param recurse_dirs: boolean indicating whether to recursively change the
1854 ownership of all the files in a path's subtree or to
1855 simply change the ownership of the path.
1856 :raises CalledProcessError: if an error occurs issuing the chown system
1857 command
1858 """
1859 user = ceph_user()
1860 user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user)
1861 cmd = ['chown', user_group, path]
1862 if os.path.isdir(path) and recurse_dirs:
1863 status_set('maintenance', ('Updating ownership of %s to %s' %
1864 (path.split('/')[-1], user)))
1865 cmd.insert('-R', 1)
1866
1867 log('Changing ownership of {path} to {user}'.format(
1868 path=path, user=user_group), DEBUG)
1869 start = datetime.now()
1870 subprocess.check_call(cmd)
1871 elapsed_time = (datetime.now() - start)
1872
1873 log('Took {secs} seconds to change the ownership of path: {path}'.format(
1874 secs=elapsed_time.total_seconds(), path=path), DEBUG)
1875
1876
1674def list_pools(service): 1877def list_pools(service):
1675 """ 1878 """
1676 This will list the current pools that Ceph has 1879 This will list the current pools that Ceph has
@@ -1689,6 +1892,39 @@ def list_pools(service):
1689 log("rados lspools failed with error: {}".format(err.output)) 1892 log("rados lspools failed with error: {}".format(err.output))
1690 raise 1893 raise
1691 1894
1895
1896def dirs_need_ownership_update(service):
1897 """Determines if directories still need change of ownership.
1898
1899 Examines the set of directories under the /var/lib/ceph/{service} directory
1900 and determines if they have the correct ownership or not. This is
1901 necessary due to the upgrade from Hammer to Jewel where the daemon user
1902 changes from root: to ceph:.
1903
1904 :param service: the name of the service folder to check (e.g. osd, mon)
1905 :return: boolean. True if the directories need a change of ownership,
1906 False otherwise.
1907 :raises IOError: if an error occurs reading the file stats from one of
1908 the child directories.
1909 :raises OSError: if the specified path does not exist or some other error
1910 """
1911 expected_owner = expected_group = ceph_user()
1912 path = os.path.join(CEPH_BASE_DIR, service)
1913 for child in _get_child_dirs(path):
1914 child_path = os.path.join(path, child)
1915 curr_owner, curr_group = owner(child_path)
1916
1917 if (curr_owner == expected_owner) and (curr_group == expected_group):
1918 continue
1919
1920 log('Directory "%s" needs its ownership updated' % child_path,
1921 DEBUG)
1922 return True
1923
1924 # All child directories had the expected ownership
1925 return False
1926
1927
1692# A dict of valid ceph upgrade paths. Mapping is old -> new 1928# A dict of valid ceph upgrade paths. Mapping is old -> new
1693UPGRADE_PATHS = { 1929UPGRADE_PATHS = {
1694 'firefly': 'hammer', 1930 'firefly': 'hammer',
diff --git a/unit_tests/test_osd_upgrade_roll.py b/unit_tests/test_osd_upgrade_roll.py
index 4a878ab..2e58c88 100644
--- a/unit_tests/test_osd_upgrade_roll.py
+++ b/unit_tests/test_osd_upgrade_roll.py
@@ -13,9 +13,11 @@
13# limitations under the License. 13# limitations under the License.
14 14
15import time 15import time
16import os
16import unittest 17import unittest
18import sys
17 19
18from mock import patch, call 20from mock import patch, call, mock_open
19 21
20import ceph 22import ceph
21from ceph import CrushLocation 23from ceph import CrushLocation
@@ -61,10 +63,10 @@ def monitor_key_side_effect(*args):
61 63
62 64
63class UpgradeRollingTestCase(unittest.TestCase): 65class UpgradeRollingTestCase(unittest.TestCase):
66 @patch('ceph.dirs_need_ownership_update')
64 @patch('ceph.apt_install') 67 @patch('ceph.apt_install')
65 @patch('ceph.chownr') 68 @patch('ceph.chownr')
66 @patch('ceph.service_stop') 69 @patch('ceph.service_restart')
67 @patch('ceph.service_start')
68 @patch('ceph.log') 70 @patch('ceph.log')
69 @patch('ceph.status_set') 71 @patch('ceph.status_set')
70 @patch('ceph.apt_update') 72 @patch('ceph.apt_update')
@@ -75,16 +77,16 @@ class UpgradeRollingTestCase(unittest.TestCase):
75 @patch('ceph.config') 77 @patch('ceph.config')
76 def test_upgrade_osd_hammer(self, config, get_version, systemd, local_osds, 78 def test_upgrade_osd_hammer(self, config, get_version, systemd, local_osds,
77 add_source, apt_update, status_set, log, 79 add_source, apt_update, status_set, log,
78 service_start, service_stop, chownr, 80 service_restart, chownr, apt_install,
79 apt_install): 81 dirs_need_ownership_update):
80 config.side_effect = config_side_effect 82 config.side_effect = config_side_effect
81 get_version.side_effect = [0.80, 0.94] 83 get_version.side_effect = [0.80, 0.94]
82 systemd.return_value = False 84 systemd.return_value = False
83 local_osds.return_value = [0, 1, 2] 85 local_osds.return_value = [0, 1, 2]
86 dirs_need_ownership_update.return_value = False
84 87
85 ceph.upgrade_osd('hammer') 88 ceph.upgrade_osd('hammer')
86 service_stop.assert_called_with('ceph-osd-all') 89 service_restart.assert_called_with('ceph-osd-all')
87 service_start.assert_called_with('ceph-osd-all')
88 status_set.assert_has_calls([ 90 status_set.assert_has_calls([
89 call('maintenance', 'Upgrading osd'), 91 call('maintenance', 'Upgrading osd'),
90 ]) 92 ])
@@ -97,10 +99,12 @@ class UpgradeRollingTestCase(unittest.TestCase):
97 # Make sure on an Upgrade to Hammer that chownr was NOT called. 99 # Make sure on an Upgrade to Hammer that chownr was NOT called.
98 assert not chownr.called 100 assert not chownr.called
99 101
102 @patch('ceph._upgrade_single_osd')
103 @patch('ceph.update_owner')
104 @patch('os.listdir')
105 @patch('ceph._get_child_dirs')
106 @patch('ceph.dirs_need_ownership_update')
100 @patch('ceph.apt_install') 107 @patch('ceph.apt_install')
101 @patch('ceph.chownr')
102 @patch('ceph.service_stop')
103 @patch('ceph.service_start')
104 @patch('ceph.log') 108 @patch('ceph.log')
105 @patch('ceph.status_set') 109 @patch('ceph.status_set')
106 @patch('ceph.apt_update') 110 @patch('ceph.apt_update')
@@ -111,19 +115,31 @@ class UpgradeRollingTestCase(unittest.TestCase):
111 @patch('ceph.config') 115 @patch('ceph.config')
112 def test_upgrade_osd_jewel(self, config, get_version, systemd, 116 def test_upgrade_osd_jewel(self, config, get_version, systemd,
113 local_osds, add_source, apt_update, status_set, 117 local_osds, add_source, apt_update, status_set,
114 log, service_start, service_stop, chownr, 118 log, apt_install, dirs_need_ownership_update,
115 apt_install): 119 _get_child_dirs, listdir, update_owner,
120 _upgrade_single_osd):
116 config.side_effect = config_side_effect 121 config.side_effect = config_side_effect
117 get_version.side_effect = [0.94, 10.1] 122 get_version.side_effect = [0.94, 10.1]
118 systemd.return_value = False 123 systemd.return_value = False
119 local_osds.return_value = [0, 1, 2] 124 local_osds.return_value = [0, 1, 2]
125 listdir.return_value = ['osd', 'mon', 'fs']
126 _get_child_dirs.return_value = ['ceph-0', 'ceph-1', 'ceph-2']
127 dirs_need_ownership_update.return_value = True
120 128
121 ceph.upgrade_osd('jewel') 129 ceph.upgrade_osd('jewel')
122 service_stop.assert_called_with('ceph-osd-all') 130 update_owner.assert_has_calls([
123 service_start.assert_called_with('ceph-osd-all') 131 call(ceph.CEPH_BASE_DIR, recurse_dirs=False),
132 call(os.path.join(ceph.CEPH_BASE_DIR, 'mon')),
133 call(os.path.join(ceph.CEPH_BASE_DIR, 'fs')),
134 ])
135 _upgrade_single_osd.assert_has_calls([
136 call('0', 'ceph-0'),
137 call('1', 'ceph-1'),
138 call('2', 'ceph-2'),
139 ])
124 status_set.assert_has_calls([ 140 status_set.assert_has_calls([
125 call('maintenance', 'Upgrading osd'), 141 call('maintenance', 'Upgrading osd'),
126 call('maintenance', 'Updating file ownership for OSDs') 142 call('maintenance', 'Upgrading packages to jewel')
127 ]) 143 ])
128 log.assert_has_calls( 144 log.assert_has_calls(
129 [ 145 [
@@ -131,12 +147,76 @@ class UpgradeRollingTestCase(unittest.TestCase):
131 call('Upgrading to: jewel') 147 call('Upgrading to: jewel')
132 ] 148 ]
133 ) 149 )
134 chownr.assert_has_calls( 150
135 [ 151 @patch.object(ceph, 'stop_osd')
136 call(group='ceph', owner='ceph', path='/var/lib/ceph', 152 @patch.object(ceph, 'disable_osd')
137 follow_links=True) 153 @patch.object(ceph, 'update_owner')
138 ] 154 @patch.object(ceph, 'enable_osd')
139 ) 155 @patch.object(ceph, 'start_osd')
156 def test_upgrade_single_osd(self, start_osd, enable_osd, update_owner,
157 disable_osd, stop_osd):
158 ceph._upgrade_single_osd(1, 'ceph-1')
159 stop_osd.assert_called_with(1)
160 disable_osd.assert_called_with(1)
161 update_owner.assert_called_with('/var/lib/ceph/osd/ceph-1')
162 enable_osd.assert_called_with(1)
163 start_osd.assert_called_with(1)
164
165 @patch.object(ceph, 'systemd')
166 @patch.object(ceph, 'service_stop')
167 def test_stop_osd(self, service_stop, systemd):
168 systemd.return_value = False
169 ceph.stop_osd(1)
170 service_stop.assert_called_with('ceph-osd', id=1)
171
172 systemd.return_value = True
173 ceph.stop_osd(2)
174 service_stop.assert_called_with('ceph-osd@2')
175
176 @patch.object(ceph, 'systemd')
177 @patch.object(ceph, 'service_start')
178 def test_start_osd(self, service_start, systemd):
179 systemd.return_value = False
180 ceph.start_osd(1)
181 service_start.assert_called_with('ceph-osd', id=1)
182
183 systemd.return_value = True
184 ceph.start_osd(2)
185 service_start.assert_called_with('ceph-osd@2')
186
187 @patch('subprocess.check_call')
188 @patch('os.path.exists')
189 @patch('os.unlink')
190 @patch('ceph.systemd')
191 def test_disable_osd(self, systemd, unlink, exists, check_call):
192 systemd.return_value = True
193 ceph.disable_osd(4)
194 check_call.assert_called_with(['systemctl', 'disable', 'ceph-osd@4'])
195
196 exists.return_value = True
197 systemd.return_value = False
198 ceph.disable_osd(3)
199 unlink.assert_called_with('/var/lib/ceph/osd/ceph-3/ready')
200
201 @patch('subprocess.check_call')
202 @patch('ceph.update_owner')
203 @patch('ceph.systemd')
204 def test_enable_osd(self, systemd, update_owner, check_call):
205 systemd.return_value = True
206 ceph.enable_osd(5)
207 check_call.assert_called_with(['systemctl', 'enable', 'ceph-osd@5'])
208
209 systemd.return_value = False
210 mo = mock_open()
211 # Detect which builtin open version we need to mock based on
212 # the python version.
213 bs = 'builtins' if sys.version_info > (3, 0) else '__builtin__'
214 with patch('%s.open' % bs, mo):
215 ceph.enable_osd(6)
216 mo.assert_called_once_with('/var/lib/ceph/osd/ceph-6/ready', 'w')
217 handle = mo()
218 handle.write.assert_called_with('ready')
219 update_owner.assert_called_with('/var/lib/ceph/osd/ceph-6/ready')
140 220
141 @patch('ceph.socket') 221 @patch('ceph.socket')
142 @patch('ceph.get_osd_tree') 222 @patch('ceph.get_osd_tree')