Remove instances of "infinite" capacity from Manila

As was discussed on Liberty Mid-Cycle meetup
instances of "infinite" capacity should be replaced
with "unknown" and this capacity should be sorted
to the bottom in the capacity weigher.

Change-Id: I9b37a5ff9cceb35a964b2a0d243688eb73e5cadc
Closes-Bug: #1487478
This commit is contained in:
Igor Malinovskiy 2015-08-31 19:05:38 +03:00
parent 6c63dee78f
commit 762efd1ed7
12 changed files with 116 additions and 71 deletions

View File

@ -184,6 +184,10 @@ class InvalidGlobalAPIVersion(Invalid):
"is %(min_ver)s and maximum is %(max_ver)s.")
class InvalidCapacity(Invalid):
message = _("Invalid capacity: %(name)s = %(value)s.")
class NotFound(ManilaException):
message = _("Resource could not be found.")
code = 404

View File

@ -44,16 +44,16 @@ class CapacityFilter(filters.BaseHostFilter):
free_space = host_state.free_capacity_gb
total_space = host_state.total_capacity_gb
reserved = float(host_state.reserved_percentage) / 100
if free_space in ('infinite', 'unknown'):
if free_space == 'unknown':
# NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the
# request. Even if it was not, the retry mechanism is
# able to handle the failure by rescheduling
return True
elif total_space in ('infinite', 'unknown'):
# NOTE(xyang): If total_space is 'infinite' or 'unknown' and
elif total_space == 'unknown':
# NOTE(xyang): If total_space is 'unknown' and
# reserved is 0, we assume the back-ends can serve the request.
# If total_space is 'infinite' or 'unknown' and reserved
# If total_space is 'unknown' and reserved
# is not 0, we cannot calculate the reserved space.
# float(total_space) will throw an exception. total*reserved
# also won't work. So the back-ends cannot serve the request.

View File

@ -290,15 +290,16 @@ class HostState(object):
def consume_from_share(self, share):
"""Incrementally update host state from an share."""
share_gb = share['size']
if self.free_capacity_gb == 'infinite':
# There's virtually infinite space on back-end
pass
elif self.free_capacity_gb == 'unknown':
# Unable to determine the actual free space on back-end
pass
else:
self.free_capacity_gb -= share_gb
if (isinstance(self.free_capacity_gb, six.string_types)
and self.free_capacity_gb != 'unknown'):
raise exception.InvalidCapacity(
name='free_capacity_gb',
value=six.text_type(self.free_capacity_gb)
)
if self.free_capacity_gb != 'unknown':
self.free_capacity_gb -= share['size']
self.updated = timeutils.utcnow()
def __repr__(self):

View File

@ -55,10 +55,12 @@ class CapacityWeigher(weights.BaseHostWeigher):
reserved = float(host_state.reserved_percentage) / 100
free_space = host_state.free_capacity_gb
total_space = host_state.total_capacity_gb
if {'unknown', 'infinite'}.intersection({total_space, free_space}):
# (zhiteng) 'infinite' and 'unknown' are treated the same
# here, for sorting purpose.
free = float('inf')
if 'unknown' in (total_space, free_space):
# NOTE(u_glide): "unknown" capacity always sorts to the bottom
if CONF.capacity_weight_multiplier > 0:
free = float('-inf')
else:
free = float('inf')
else:
total = float(total_space)
if host_state.thin_provisioning:
@ -73,3 +75,26 @@ class CapacityWeigher(weights.BaseHostWeigher):
# taking into account the reserved space.
free = math.floor(free_space - total * reserved)
return free
def weigh_objects(self, weighed_obj_list, weight_properties):
weights = super(CapacityWeigher, self).weigh_objects(weighed_obj_list,
weight_properties)
# NOTE(u_glide): Replace -inf with (minimum - 1) and
# inf with (maximum + 1) to avoid errors in
# manila.openstack.common.scheduler.base_weight.normalize() method
if self.minval == float('-inf'):
self.minval = self.maxval
for val in weights:
if float('-inf') < val < self.minval:
self.minval = val
self.minval -= 1
return [self.minval if w == float('-inf') else w for w in weights]
elif self.maxval == float('inf'):
self.maxval = self.minval
for val in weights:
if self.maxval < val < float('inf'):
self.maxval = val
self.maxval += 1
return [self.maxval if w == float('inf') else w for w in weights]
else:
return weights

View File

@ -494,8 +494,8 @@ class ShareDriver(object):
vendor_name='Open Source',
driver_version='1.0',
storage_protocol=None,
total_capacity_gb='infinite',
free_capacity_gb='infinite',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
QoS_support=False,
pools=self.pools or None,

View File

@ -909,8 +909,8 @@ class GlusterfsNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver):
# TODO(deepakcs): Change below once glusterfs supports volume
# specific stats via the gluster cli.
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['total_capacity_gb'] = 'unknown'
data['free_capacity_gb'] = 'unknown'
super(GlusterfsNativeShareDriver, self)._update_share_stats(data)

View File

@ -67,6 +67,8 @@ SHARE_SERVICES_WITH_POOLS = [
# service on host5 is disabled
dict(id=5, host='host5@EEE', topic='share', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
dict(id=5, host='host6@FFF', topic='share', disabled=True,
availability_zone='zone5', updated_at=timeutils.utcnow()),
]
SHARE_SERVICE_STATES_WITH_POOLS = {
@ -139,6 +141,23 @@ SHARE_SERVICE_STATES_WITH_POOLS = {
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
'host6@FFF': dict(share_backend_name='FFF',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
pools=[dict(pool_name='pool6a',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False),
dict(pool_name='pool6b',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
}
@ -156,8 +175,6 @@ class FakeHostManager(host_manager.HostManager):
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': False,
'reserved_percentage': 10,
'timestamp': None},
@ -193,6 +210,12 @@ class FakeHostManager(host_manager.HostManager):
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'thin_provisioning': False,
'reserved_percentage': 5,
'timestamp': None},
}
@ -215,6 +238,8 @@ def mock_host_manager_db_calls(mock_obj, disabled=None):
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=5, host='host5', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=6, host='host6', topic='share', disabled=False,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
if disabled is None:
mock_obj.return_value = services

View File

@ -36,13 +36,13 @@ class CapacityWeigherTestCase(test.TestCase):
self.weight_handler = weights.HostWeightHandler(
'manila.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
def _get_weighed_host(self, hosts, weight_properties=None, index=0):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[capacity.CapacityWeigher],
hosts,
weight_properties)[0]
weight_properties)[index]
@mock.patch('manila.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
@ -87,6 +87,9 @@ class CapacityWeigherTestCase(test.TestCase):
# free_capacity_gb = 500
# free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0
# weight = 0.65
# host6: thin_provisioning = False
# free = inf
# weight = 0.0
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
@ -94,6 +97,14 @@ class CapacityWeigherTestCase(test.TestCase):
self.assertEqual(
'host2', utils.extract_host(weighed_host.obj.host))
def test_unknown_is_last(self):
hostinfo_list = self._get_all_hosts()
last_host = self._get_weighed_host(hostinfo_list, index=-1)
self.assertEqual(
'host6', utils.extract_host(last_host.obj.host))
self.assertEqual(0.0, last_host.weight)
def test_capacity_weight_multiplier_negative_1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
@ -126,6 +137,10 @@ class CapacityWeigherTestCase(test.TestCase):
# free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0
# free * (-1) = -1421.0
# weight = -0.65
# host6: thin_provisioning = False
# free = inf
# free * (-1) = -inf
# weight = 0.0
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
@ -165,6 +180,9 @@ class CapacityWeigherTestCase(test.TestCase):
# free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0
# free * 2 = 2842.0
# weight = 1.29
# host6: thin_provisioning = False
# free = inf
# weight = 0.0
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)

View File

@ -85,8 +85,8 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@ddt.data('infinite', 'unknown')
def test_capacity_filter_passes_infinite_unknown(self, free):
def test_capacity_filter_passes_unknown(self):
free = 'unknown'
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()
filter_properties = {'size': 100}
@ -98,7 +98,6 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@ddt.data(
{'free_capacity': 'infinite', 'total_capacity': 'infinite'},
{'free_capacity': 'unknown', 'total_capacity': 'unknown'})
@ddt.unpack
def test_capacity_filter_passes_total(self, free_capacity,
@ -115,7 +114,7 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@ddt.data('infinite', 'unknown', 0)
@ddt.data('unknown', 0)
def test_capacity_filter_fails_total(self, total):
self._stub_service_is_up(True)
filt_cls = self.class_map['CapacityFilter']()

View File

@ -626,27 +626,9 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(10000, fake_host.pools['pool3'].total_capacity_gb)
self.assertEqual(10000, fake_host.pools['pool3'].free_capacity_gb)
def test_update_from_share_infinite_capability(self):
share_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
fake_host = host_manager.HostState('host1#_pool0')
self.assertIsNone(fake_host.free_capacity_gb)
fake_host.update_from_share_capability(share_capability)
# Backend level stats remain uninitialized
self.assertEqual(fake_host.total_capacity_gb, 0)
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
'infinite')
self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
'infinite')
def test_update_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'infinite',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
@ -660,7 +642,7 @@ class HostStateTestCase(test.TestCase):
self.assertIsNone(fake_host.free_capacity_gb)
# Pool stats has been updated
self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
'infinite')
'unknown')
self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
'unknown')
@ -681,25 +663,9 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(fake_host.free_capacity_gb,
free_capacity - share_size)
def test_consume_from_share_infinite_capability(self):
share_capability = {
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None
}
fake_host = host_manager.PoolState('host1', share_capability, '_pool0')
share_size = 1000
fake_share = {'id': 'foo', 'size': share_size}
fake_host.update_from_share_capability(share_capability)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.free_capacity_gb, 'infinite')
def test_consume_from_share_unknown_capability(self):
share_capability = {
'total_capacity_gb': 'infinite',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None
@ -710,9 +676,16 @@ class HostStateTestCase(test.TestCase):
fake_host.update_from_share_capability(share_capability)
fake_host.consume_from_share(fake_share)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.total_capacity_gb, 'unknown')
self.assertEqual(fake_host.free_capacity_gb, 'unknown')
def test_consume_from_share_invalid_capacity(self):
fake_host = host_manager.PoolState('host1', {}, '_pool0')
fake_host.free_capacity_gb = 'invalid_foo_string'
self.assertRaises(exception.InvalidCapacity,
fake_host.consume_from_share, 'fake')
def test_repr(self):
capability = {

View File

@ -133,8 +133,8 @@ class EMCShareFrameworkTestCase(test.TestCase):
data["vendor_name"] = 'EMC'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'NFS_CIFS'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['total_capacity_gb'] = 'unknown'
data['free_capacity_gb'] = 'unknown'
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['pools'] = None

View File

@ -1313,8 +1313,8 @@ class GlusterfsNativeShareDriverTestCase(test.TestCase):
'storage_protocol': 'glusterfs',
'reserved_percentage': 0,
'QoS_support': False,
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'pools': None,
'snapshot_support': True,
}