Merge "Ceph pool creation should be called only once" into stable/newton
This commit is contained in:
commit
f59d2c55ef
|
@ -89,6 +89,15 @@ class PgCountPipeline(VolumeObjectMethodsMixin, BasePipeline):
|
|||
@classmethod
|
||||
def process_deployment_for_cluster(cls, cluster, cluster_data):
|
||||
"""Added ceph related information to deployment info for cluster."""
|
||||
storage_attrs = cluster_data.setdefault('storage', {})
|
||||
|
||||
if 'pg_num' in storage_attrs and 'per_pool_pg_nums' in storage_attrs:
|
||||
logger.debug("pg_num %s and per_pool_pg_nums %s are already "
|
||||
"calculated for cluster %s. Getting values from "
|
||||
"the cluster attributes", storage_attrs['pg_num'],
|
||||
storage_attrs['per_pool_pg_nums'], cluster.id)
|
||||
return
|
||||
|
||||
all_nodes = {n.uid: n for n in cluster.nodes}
|
||||
osd_num = 0
|
||||
for n in cluster_data['nodes']:
|
||||
|
@ -100,7 +109,6 @@ class PgCountPipeline(VolumeObjectMethodsMixin, BasePipeline):
|
|||
part.get('size', 0) > 0):
|
||||
osd_num += 1
|
||||
|
||||
storage_attrs = cluster_data.setdefault('storage', {})
|
||||
pg_counts = get_pool_pg_count(
|
||||
osd_num=osd_num,
|
||||
pool_sz=int(storage_attrs['osd_pool_size']),
|
||||
|
@ -117,6 +125,20 @@ class PgCountPipeline(VolumeObjectMethodsMixin, BasePipeline):
|
|||
storage_attrs['pg_num'] = pg_counts['default_pg_num']
|
||||
storage_attrs['per_pool_pg_nums'] = pg_counts
|
||||
|
||||
cls._save_storage_attrs(cluster, storage_attrs['pg_num'],
|
||||
storage_attrs['per_pool_pg_nums'])
|
||||
|
||||
@classmethod
|
||||
def _save_storage_attrs(cls, cluster, pg_num, per_pool_pg_nums):
|
||||
attrs = objects.Cluster.get_attributes(cluster)
|
||||
logger.debug("Saving pg_num and per_pool_pg_nums values to "
|
||||
"cluster attributes")
|
||||
attrs['editable']['storage']['pg_num'] = \
|
||||
{'value': pg_num, 'type': 'hidden'}
|
||||
attrs['editable']['storage']['per_pool_pg_nums'] = \
|
||||
{'value': per_pool_pg_nums, 'type': 'hidden'}
|
||||
objects.Cluster.update_attributes(cluster, attrs)
|
||||
|
||||
|
||||
class SetImageCacheMaxSizePipeline(VolumeObjectMethodsMixin, BasePipeline):
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# under the License.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from mock import patch
|
||||
import netaddr
|
||||
|
||||
|
@ -1710,6 +1711,51 @@ class TestHandlers(BaseIntegrationTest):
|
|||
task = self.env.launch_deployment()
|
||||
self.assertNotEqual(task.status, consts.TASK_STATUSES.error)
|
||||
|
||||
@mock_rpc()
|
||||
def test_ceph_osd_pg_num_calculated_once(self):
|
||||
cluster = self.env.create(
|
||||
nodes_kwargs=[
|
||||
{'roles': ['controller', 'ceph-osd'],
|
||||
'pending_addition': True}])
|
||||
self.app.patch(
|
||||
reverse(
|
||||
'ClusterAttributesHandler',
|
||||
kwargs={'cluster_id': cluster['id']}),
|
||||
params=jsonutils.dumps({
|
||||
'editable': {
|
||||
'storage': {
|
||||
'volumes_ceph': {'value': True},
|
||||
'osd_pool_size': {'value': '1'},
|
||||
'volumes_lvm': {'value': False},
|
||||
}
|
||||
}
|
||||
}),
|
||||
headers=self.default_headers)
|
||||
|
||||
task = self.env.launch_deployment()
|
||||
self.assertNotEqual(task.status, consts.TASK_STATUSES.error)
|
||||
|
||||
attrs = objects.Cluster.get_editable_attributes(cluster)
|
||||
self.assertIn('pg_num', attrs['storage'])
|
||||
self.assertIn('per_pool_pg_nums', attrs['storage'])
|
||||
|
||||
objects.Task.delete(task)
|
||||
self.db.flush()
|
||||
|
||||
# Adding one more ceph node
|
||||
self.env.create_node(roles=['ceph-osd'], cluster_id=cluster.id,
|
||||
pending_addition=True)
|
||||
|
||||
# Checking calculation is not performed
|
||||
with patch('nailgun.extensions.volume_manager.extension.'
|
||||
'get_pool_pg_count') as calc:
|
||||
|
||||
# Starting another deployment
|
||||
task = self.env.launch_deployment()
|
||||
self.assertNotEqual(task.status, consts.TASK_STATUSES.error)
|
||||
|
||||
self.assertFalse(calc.called)
|
||||
|
||||
@mock_rpc()
|
||||
def test_admin_untagged_intersection(self):
|
||||
meta = self.env.default_metadata()
|
||||
|
|
Loading…
Reference in New Issue