Remove provisioned calculation on non thin provision backends

On those backends, there is no need to calculate provisioned_capacity_gb
as it is not used during the scheduling. This calculation was not
scaling properly on big environments as it implies many database
queries.

Closes-Bug: #1869712

Change-Id: Icb8947991723a2a1cc334a72276a35cf50fc1b7d
This commit is contained in:
Jose Castro Leon 2020-04-07 11:24:00 +02:00
parent 7c6bd92b66
commit 2c30fed486
3 changed files with 203 additions and 30 deletions

View File

@ -422,6 +422,8 @@ class PoolState(HostState):
'allocated_capacity_gb', 0)
self.qos = capability.get('qos', False)
self.reserved_percentage = capability['reserved_percentage']
self.thin_provisioning = scheduler_utils.thin_provisioning(
capability.get('thin_provisioning', False))
# NOTE(xyang): provisioned_capacity_gb is the apparent total
# capacity of all the shares created on a backend, which is
# greater than or equal to allocated_capacity_gb, which is the
@ -430,16 +432,19 @@ class PoolState(HostState):
# NOTE(nidhimittalhada): If 'provisioned_capacity_gb' is not set,
# then calculating 'provisioned_capacity_gb' from share sizes
# on host, as per information available in manila database.
# NOTE(jose-castro-leon): Only calculate provisioned_capacity_gb
# on thin provisioned pools
self.provisioned_capacity_gb = capability.get(
'provisioned_capacity_gb') or (
self._estimate_provisioned_capacity(self.host,
context=context))
'provisioned_capacity_gb')
if self.thin_provisioning and self.provisioned_capacity_gb is None:
self.provisioned_capacity_gb = (
self._estimate_provisioned_capacity(self.host,
context=context))
self.max_over_subscription_ratio = capability.get(
'max_over_subscription_ratio',
CONF.max_over_subscription_ratio)
self.thin_provisioning = capability.get(
'thin_provisioning', False)
self.dedupe = capability.get(
'dedupe', False)
self.compression = capability.get(

View File

@ -30,6 +30,7 @@ from manila import db
from manila import exception
from manila.scheduler.filters import base_host
from manila.scheduler import host_manager
from manila.scheduler import utils as scheduler_utils
from manila import test
from manila.tests.scheduler import fakes
from manila import utils
@ -867,6 +868,7 @@ class PoolStateTestCase(test.TestCase):
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': True,
'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
@ -884,6 +886,70 @@ class PoolStateTestCase(test.TestCase):
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': False, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
{
'id': 2, 'host': 'host1',
'status': 'available',
'share_id': 12, 'size': None,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': [False], 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
{
'id': 2, 'host': 'host1',
'status': 'available',
'share_id': 12, 'size': None,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': [True, False], 'cap1': 'val1',
'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 4,
'updated_at': timeutils.utcnow()
},
{
'id': 2, 'host': 'host1',
'status': 'available',
'share_id': 12, 'size': None,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
@ -892,6 +958,30 @@ class PoolStateTestCase(test.TestCase):
'ipv6_support': False},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': True, 'cap1': 'val1', 'cap2': 'val2',
'ipv4_support': True, 'ipv6_support': False},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': [False], 'cap1': 'val1', 'cap2': 'val2',
'ipv4_support': True, 'ipv6_support': False},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'reserved_percentage': 0, 'timestamp': None,
'thin_provisioning': [True, False], 'cap1': 'val1',
'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': False},
'instances': []
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
@ -934,12 +1024,76 @@ class PoolStateTestCase(test.TestCase):
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1,
'thin_provisioning': True, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1,
'thin_provisioning': [False], 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1,
'thin_provisioning': [True, False], 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256,
'reserved_percentage': 0, 'timestamp': None, 'cap1': 'val1',
'cap2': 'val2'},
'thin_provisioning': False, 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
'id': 1, 'host': 'host1',
'status': 'available',
'share_id': 11, 'size': 1,
'updated_at': timeutils.utcnow()
},
]
},
{
'share_capability':
{'total_capacity_gb': 1024, 'free_capacity_gb': 512,
'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256,
'thin_provisioning': [False], 'reserved_percentage': 0,
'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'},
'instances':
[
{
@ -969,35 +1123,42 @@ class PoolStateTestCase(test.TestCase):
self.assertEqual(512, fake_pool.free_capacity_gb)
self.assertDictMatch(share_capability, fake_pool.capabilities)
if 'provisioned_capacity_gb' not in share_capability:
db.share_instances_get_all_by_host.assert_called_once_with(
fake_context, fake_pool.host, with_share_data=True)
if 'thin_provisioning' in share_capability:
thin_provisioned = scheduler_utils.thin_provisioning(
share_capability['thin_provisioning'])
else:
thin_provisioned = False
if len(instances) > 0:
self.assertEqual(4, fake_pool.provisioned_capacity_gb)
if thin_provisioned:
self.assertEqual(thin_provisioned, fake_pool.thin_provisioning)
if 'provisioned_capacity_gb' not in share_capability or (
share_capability['provisioned_capacity_gb'] is None):
db.share_instances_get_all_by_host.assert_called_once_with(
fake_context, fake_pool.host, with_share_data=True)
if len(instances) > 0:
self.assertEqual(4, fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(0, fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(0, fake_pool.provisioned_capacity_gb)
if 'allocated_capacity_gb' in share_capability:
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
elif 'allocated_capacity_gb' not in share_capability:
self.assertEqual(0, fake_pool.allocated_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' not in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(0, fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
elif 'provisioned_capacity_gb' in share_capability and (
'allocated_capacity_gb' in share_capability):
self.assertFalse(db.share_instances_get_all_by_host.called)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
else:
self.assertFalse(fake_pool.thin_provisioning)
self.assertFalse(db.share_instances_get_all_by_host.called)
if 'provisioned_capacity_gb' not in share_capability or (
share_capability['provisioned_capacity_gb'] is None):
self.assertIsNone(fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
if 'allocated_capacity_gb' in share_capability:
self.assertEqual(share_capability['allocated_capacity_gb'],
fake_pool.allocated_capacity_gb)
self.assertEqual(share_capability['provisioned_capacity_gb'],
fake_pool.provisioned_capacity_gb)
else:
self.assertEqual(0, fake_pool.allocated_capacity_gb)
if 'ipv4_support' in share_capability:
self.assertEqual(share_capability['ipv4_support'],
fake_pool.ipv4_support)

View File

@ -0,0 +1,7 @@
---
fixes:
- |
Reduces an increase of schedule time for non thin provisioned backends.
On those backends, there is no need to calculate provisioned_capacity_gb,
as it is not used during the scheduling. This calculation was not scaling
properly on big environments as it implies many database queries.