Add Unlock settings tab test
This patch implements tests for Unlock Settings Tab Feature with randomly changed settings. Scenario: 1. Load clusters' configurations from the file 2. Revert snapshot with appropriate nodes count 3. Create a cluster from config 4. Update nodes accordingly to the config 5. Deploy the cluster 6. Get cluster attributes 7. Modify randomly cluster attributes 8. Add if it's needed ceph nodes 9. Update cluster attributes with changed one 10. Redeploy cluster 11. Run OSTF 12. Go to the next config Duration xxx m Snapshot will be maked for all failed configurations Change-Id: I3376dc29cf8083ead742384725e3e0a10dae2b34
This commit is contained in:
parent
9c2e008648
commit
8d6186acd3
|
@ -384,6 +384,11 @@ Test task coverage by LCM tests
|
|||
.. automodule:: fuelweb_test.tests.tests_lcm.test_task_coverage
|
||||
:members:
|
||||
|
||||
Test unlock settings tab
|
||||
------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_unlock_settings_tab
|
||||
:members:
|
||||
|
||||
Test for unlock settings tab from different cluster states
|
||||
----------------------------------------------------------
|
||||
.. automodule:: fuelweb_test.tests.test_states_unlock_settings_tab
|
||||
|
|
|
@ -22,8 +22,10 @@ import inspect
|
|||
import json
|
||||
import os
|
||||
import posixpath
|
||||
import random
|
||||
import re
|
||||
import signal
|
||||
import string
|
||||
import time
|
||||
import traceback
|
||||
|
||||
|
@ -1181,3 +1183,305 @@ def install_configdb(master_node_ip):
|
|||
]
|
||||
for cmd in cmds:
|
||||
ssh_manager.execute_on_remote(ip=ip, cmd=cmd)
|
||||
|
||||
|
||||
# pylint: disable=eval-used
|
||||
class SettingsChanger(object):
|
||||
"""
|
||||
Class for changing cluster settings
|
||||
"""
|
||||
|
||||
SKIPPED_FIELDS_LIST = [
|
||||
'additional_components.mongo', 'storage.osd_pool_size',
|
||||
'syslog.syslog_port', 'murano_settings.murano_repo_url',
|
||||
'external_mongo.hosts_ip', 'kernel_params.kernel', 'corosync.port',
|
||||
'repo_setup.uca_openstack_release', 'repo_setup.uca_repo_url',
|
||||
'public_ssl.cert_source', 'public_ssl.hostname',
|
||||
'operator_user.homedir', 'access.email', 'common.libvirt_type',
|
||||
'storage.images_vcenter', 'additional_components.ironic',
|
||||
'additional_components.ceilometer', 'workloads_collector.tenant',
|
||||
'access.user', 'workloads_collector.user', 'operator_user.name']
|
||||
|
||||
def __init__(self, attrs=None):
|
||||
self._attrs = attrs['editable'] if attrs else None
|
||||
self._types = ['checkbox', 'radio', 'text', 'textarea']
|
||||
self.options = None
|
||||
|
||||
@staticmethod
|
||||
def _gen_random(length=10):
|
||||
return ''.join(random.choice(string.lowercase) for _ in range(length))
|
||||
|
||||
@staticmethod
|
||||
def _get_condition(restriction):
|
||||
rst = re.findall(
|
||||
'settings:(.+?).value ([=!]+) (true|false|\'.+\')',
|
||||
restriction)
|
||||
cmd = []
|
||||
keys = []
|
||||
for r in rst:
|
||||
cmd.append("self.options['{opt}'][2] {oper} {sign}".format(
|
||||
opt=r[0],
|
||||
oper=r[1],
|
||||
sign=True
|
||||
if r[2] == 'true'
|
||||
else False if r[2] == 'false' else r[2]))
|
||||
keys.append(r[0])
|
||||
if ' and ' in restriction:
|
||||
expression = ' and '.join(cmd)
|
||||
elif ' or ' in restriction:
|
||||
expression = ' or '.join(cmd)
|
||||
else:
|
||||
expression = ' '.join(cmd)
|
||||
return expression, keys
|
||||
|
||||
def __change_check_box(self, key, opt, keys=None):
|
||||
opt['value'] = not self.options[key][2]
|
||||
self.options[key][2] = opt['value']
|
||||
if keys:
|
||||
for k in keys:
|
||||
self.options[k][5] = False
|
||||
logger.info('`{0}` was changed to `{1}`'.format(key, opt['value']))
|
||||
|
||||
def __change_radio(self, key, opt, keys=None):
|
||||
values = self.options[key][3]
|
||||
current_val = self.options[key][2]
|
||||
for val in values:
|
||||
if val['data'] != current_val:
|
||||
opt['value'] = val['data']
|
||||
self.options[key][2] = opt['value']
|
||||
if keys:
|
||||
for k in keys:
|
||||
self.options[k][5] = False
|
||||
logger.info(
|
||||
'`{0}` was changed to `{1}`'.format(
|
||||
key, val['data']))
|
||||
break
|
||||
else:
|
||||
logger.debug(
|
||||
'Failed to set radio {}'.format(self.options[key][0]))
|
||||
|
||||
def __change_check_box_restr(self, condition, keys, key, opt):
|
||||
try:
|
||||
if not eval(condition) and self.options[key][5]:
|
||||
self.__change_check_box(key, opt, keys)
|
||||
else:
|
||||
logger.info(
|
||||
"Value `{}` couldn't be changed to `{}` due "
|
||||
"to restrictions".format(key, not self.options[key][2]))
|
||||
except KeyError as e:
|
||||
logger.debug('Value was not found {}'.format(e))
|
||||
|
||||
def __change_radio_restr(self, condition, keys, key, opt):
|
||||
try:
|
||||
if not eval(condition) and self.options[key][5]:
|
||||
self.__change_radio(key, opt, keys)
|
||||
except KeyError as e:
|
||||
logger.debug('Value was not found {}'.format(e))
|
||||
|
||||
def _make_change_wo_restr(self, key, opt):
|
||||
tp = opt.get('type')
|
||||
if tp == 'checkbox':
|
||||
self.__change_check_box(key, opt)
|
||||
elif tp == 'radio':
|
||||
self.__change_radio(key, opt)
|
||||
elif tp == 'text':
|
||||
opt['value'] = self._gen_random()
|
||||
self.options[key][2] = opt['value']
|
||||
logger.info('`{0}` was changed to `{1}`'.format(key, opt['value']))
|
||||
|
||||
def _make_change_with_restr(self, key, opt):
|
||||
restrictions = self.options[key][4]
|
||||
tp = opt.get('type')
|
||||
if not restrictions:
|
||||
return
|
||||
for rest in restrictions:
|
||||
condition, keys = self._get_condition(rest)
|
||||
if tp == 'checkbox':
|
||||
self.__change_check_box_restr(condition, keys, key, opt)
|
||||
elif tp == 'radio':
|
||||
self.__change_radio_restr(condition, keys, key, opt)
|
||||
elif tp == 'text':
|
||||
logger.debug('Did you forget me `{}`?'.format(key))
|
||||
|
||||
def _change_options(self, options, restr=False, ensure=False):
|
||||
if not (restr or ensure):
|
||||
change_func = self._make_change_wo_restr
|
||||
elif restr and not ensure:
|
||||
change_func = self._make_change_with_restr
|
||||
else:
|
||||
change_func = self._ensure_options_correct
|
||||
|
||||
for attr in self._attrs:
|
||||
for option in self._attrs[attr]:
|
||||
key = '.'.join([attr, option])
|
||||
if key not in options:
|
||||
continue
|
||||
# skip some values
|
||||
if key in self.SKIPPED_FIELDS_LIST:
|
||||
logger.debug("Skipped option `{}`".format(key))
|
||||
continue
|
||||
opt = self._attrs[attr][option]
|
||||
change_func(key, opt)
|
||||
|
||||
def _ensure_options_correct(self, key, opt):
|
||||
restrictions = self.options[key][4]
|
||||
tp = opt.get('type')
|
||||
if not restrictions:
|
||||
return
|
||||
for rest in restrictions:
|
||||
condition, _ = self._get_condition(rest)
|
||||
if tp == 'checkbox':
|
||||
try:
|
||||
if eval(condition) and self.options[key][2]:
|
||||
self.__change_check_box(key, opt)
|
||||
except KeyError as e:
|
||||
logger.debug('Value was not found {}'.format(e))
|
||||
elif tp == 'radio':
|
||||
logger.info('Radio `{0}` has value `{1}` when restriction '
|
||||
'is: {2}'.format(key, opt['value'], condition))
|
||||
elif tp == 'text':
|
||||
logger.info('Do I rely on anything `{}`?'.format(key))
|
||||
|
||||
@staticmethod
|
||||
def _calculate_options(options, randomize=None):
|
||||
if randomize:
|
||||
count = randomize if randomize < len(options) else len(options) - 1
|
||||
random_keys = random.sample(options.keys(), count)
|
||||
options_wo_restr = \
|
||||
{opt: options[opt]
|
||||
for opt in options
|
||||
if opt in random_keys and options[opt][4] is None}
|
||||
options_with_restr = \
|
||||
{opt: options[opt]
|
||||
for opt in options
|
||||
if opt in random_keys and options[opt][4] is not None}
|
||||
else:
|
||||
options_wo_restr = \
|
||||
{opt: options[opt]
|
||||
for opt in options if options[opt][4] is None}
|
||||
options_with_restr = \
|
||||
{opt: options[opt]
|
||||
for opt in options if options[opt][4] is not None}
|
||||
|
||||
return options_wo_restr, options_with_restr
|
||||
|
||||
def make_changes(self, attrs=None, options=None, randomize=None):
|
||||
"""
|
||||
Function makes changes in cluster settings in paramsters
|
||||
which are presented in options list
|
||||
:param attrs: cluster attributes
|
||||
:param options: dict with options provided by parser
|
||||
:param randomize: specify if random changing is needed
|
||||
:return: changed cluster attributes
|
||||
"""
|
||||
if attrs:
|
||||
self.attrs = attrs
|
||||
# Create two dicts with options without restrictions and with them
|
||||
self.options = options if options else self.parse_settings()
|
||||
opt_wo_restr, opt_with_restr = \
|
||||
self._calculate_options(self.options, randomize)
|
||||
# First of all lets modify values without restrictions
|
||||
logger.info("Changing options without restrictions")
|
||||
self._change_options(opt_wo_restr, False)
|
||||
self.options.update(opt_wo_restr)
|
||||
# iterate through options with restrictions
|
||||
logger.info("Changing options with restrictions")
|
||||
self._change_options(opt_with_restr, True)
|
||||
logger.info("Check options for invalid due to restrictions "
|
||||
"and modify it if it's necessary")
|
||||
self.options.update(opt_with_restr)
|
||||
self._change_options(self.options, True, True)
|
||||
return self.attrs
|
||||
|
||||
def load_settings_from_file(self, file_name, file_type='json'):
|
||||
"""
|
||||
Function loads settings from file
|
||||
:param file_name: file to load from
|
||||
:param file_type: file format `json` or `yaml`
|
||||
:return: nothing
|
||||
"""
|
||||
try:
|
||||
with open(file_name, 'r') as f:
|
||||
if file_type == 'json':
|
||||
self.attrs = json.load(f)
|
||||
else:
|
||||
self.attrs = yaml.load(f)
|
||||
except ValueError:
|
||||
logger.error("Check settings file for consistency")
|
||||
raise
|
||||
except IOError:
|
||||
logger.error("Check settings file existence")
|
||||
raise
|
||||
else:
|
||||
logger.info('Settings were loaded from file {}'.format(file_name))
|
||||
|
||||
def save_settings_to_file(self, file_name, file_type='json'):
|
||||
"""
|
||||
Function saves settings to file
|
||||
:param file_name: file to save to
|
||||
:param file_type: file format `json` or `yaml`
|
||||
:return: nothing
|
||||
"""
|
||||
with open(file_name, 'w') as f:
|
||||
if file_type == 'json':
|
||||
json.dump(self.attrs, f)
|
||||
else:
|
||||
yaml.dump(self.attrs, f)
|
||||
logger.info('Settings were saved to file {}'.format(file_name))
|
||||
|
||||
# pylint: disable=too-many-nested-blocks
|
||||
def parse_settings(self, attrs=None):
|
||||
"""
|
||||
Function parses attributes by its type
|
||||
:param attrs: a cluster attributes
|
||||
:return: a dict with options
|
||||
"""
|
||||
attrs = attrs['editable'] if attrs else self._attrs
|
||||
self.options = {}
|
||||
for attr in attrs:
|
||||
for option in attrs[attr]:
|
||||
key = '.'.join([attr, option])
|
||||
opt = attrs[attr][option]
|
||||
tp = opt.get('type')
|
||||
label = opt.get('label')
|
||||
value = opt.get('value')
|
||||
values = opt.get('values')
|
||||
restrictions = opt.get('restrictions', None)
|
||||
if tp not in self._types:
|
||||
continue
|
||||
if key in self.options:
|
||||
logger.debug('`{0}` has duplicates'.format(key))
|
||||
continue
|
||||
restr = None
|
||||
if restrictions:
|
||||
restr = []
|
||||
for rest in restrictions:
|
||||
if isinstance(rest, dict):
|
||||
if rest.get('condition') \
|
||||
and 'value' in rest['condition']:
|
||||
restr.append(rest['condition'])
|
||||
elif 'value' in rest.keys()[0]:
|
||||
restr.append(rest.keys()[0])
|
||||
elif 'value' in rest:
|
||||
restr.append(rest)
|
||||
else:
|
||||
restr.append(rest)
|
||||
self.options[key] = \
|
||||
[label, tp, value, values,
|
||||
restr if restr else None, True]
|
||||
logger.debug(
|
||||
'Option {0} has been added with {1}'.format(
|
||||
key, self.options[key]))
|
||||
return self.options
|
||||
# pylint: enable=too-many-nested-blocks
|
||||
|
||||
@property
|
||||
def attrs(self):
|
||||
dct = dict()
|
||||
dct['editable'] = self._attrs
|
||||
return dct
|
||||
|
||||
@attrs.setter
|
||||
def attrs(self, attrs):
|
||||
self._attrs = attrs['editable']
|
||||
# pylint: enable=eval-used
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
---
|
||||
-
|
||||
name: "1Controller_2Computes_vlan"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "compute"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
-
|
||||
name: "1Controller_2Computes_gre"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "compute"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "gre"
|
||||
-
|
||||
name: "1Controller_2Computes_tun"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "compute"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "tun"
|
||||
-
|
||||
name: "1Controller_ceph_2Computes_ceph_vlan"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
- "ceph-osd"
|
||||
slave-02:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
slave-03:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
settings:
|
||||
volumes_ceph: true
|
||||
images_ceph: true
|
||||
volumes_lvm: false
|
||||
-
|
||||
name: "1Controller_1Compute_1cndr_mongo_3ceph"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "cinder"
|
||||
- "mongo"
|
||||
slave-04:
|
||||
- "ceph-osd"
|
||||
slave-05:
|
||||
- "ceph-osd"
|
||||
slave-06:
|
||||
- "ceph-osd"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
-
|
||||
name: "3Controller_3Computes_ceph"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "controller"
|
||||
slave-03:
|
||||
- "controller"
|
||||
slave-04:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
slave-05:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
slave-06:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
-
|
||||
name: "3Controller_mongo_3Computes_ceph"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
- "mongo"
|
||||
slave-02:
|
||||
- "controller"
|
||||
- "mongo"
|
||||
slave-03:
|
||||
- "controller"
|
||||
- "mongo"
|
||||
slave-04:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
slave-05:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
slave-06:
|
||||
- "compute"
|
||||
- "ceph-osd"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
-
|
||||
name: "1Controller_1Compute_1Cinder_3Ceph_Rados_vlan"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "cinder"
|
||||
slave-04:
|
||||
- "ceph-osd"
|
||||
slave-05:
|
||||
- "ceph-osd"
|
||||
slave-06:
|
||||
- "ceph-osd"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
settings:
|
||||
volume-lvm: true
|
||||
volume-ceph: false
|
||||
image-ceph: true
|
||||
rados-ceph: true
|
||||
ephemeral-ceph: false
|
||||
replica-ceph: 2
|
||||
-
|
||||
name: "1Controller_2Computes_1Cinder_3Ceph_1Mongo_Ceph_Image_Ceilometer_vlan"
|
||||
nodes:
|
||||
slave-01:
|
||||
- "controller"
|
||||
slave-02:
|
||||
- "compute"
|
||||
slave-03:
|
||||
- "compute"
|
||||
slave-04:
|
||||
- "cinder"
|
||||
slave-05:
|
||||
- "ceph-osd"
|
||||
slave-06:
|
||||
- "ceph-osd"
|
||||
slave-07:
|
||||
- "ceph-osd"
|
||||
slave-08:
|
||||
- "mongo"
|
||||
network:
|
||||
net_provider: "neutron"
|
||||
net_segment_type: "vlan"
|
||||
settings:
|
||||
volume-lvm: true
|
||||
volume-ceph: false
|
||||
image-ceph: true
|
||||
rados-ceph: false
|
||||
ephemeral-ceph: false
|
||||
replica-ceph: 2
|
||||
ceilometer: true
|
|
@ -0,0 +1,255 @@
|
|||
# Copyright 2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
# pylint: disable=import-error
|
||||
from six.moves.urllib.error import HTTPError
|
||||
# pylint: enable=import-error
|
||||
# pylint: disable=redefined-builtin
|
||||
from six.moves import xrange
|
||||
# pylint: enable=redefined-builtin
|
||||
|
||||
from fuelweb_test import logger
|
||||
from fuelweb_test import settings
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers.utils import SettingsChanger
|
||||
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||
from fuelweb_test.tests.base_test_case import TestBasic
|
||||
|
||||
|
||||
@test(groups=["unlock_settings_tab"])
|
||||
class UnlockSettingsTab(TestBasic):
|
||||
"""UnlockSettingsTab.""" # TODO documentation
|
||||
|
||||
def __init__(self):
|
||||
super(UnlockSettingsTab, self).__init__()
|
||||
self._cluster_id = None
|
||||
self._cluster_name = None
|
||||
|
||||
@property
|
||||
def cluster_id(self):
|
||||
return self._cluster_id
|
||||
|
||||
@cluster_id.setter
|
||||
def cluster_id(self, cluster_id):
|
||||
self._cluster_id = cluster_id
|
||||
|
||||
@property
|
||||
def cluster_name(self):
|
||||
return self._cluster_name
|
||||
|
||||
@cluster_name.setter
|
||||
def cluster_name(self, cluster_name):
|
||||
self._cluster_name = cluster_name
|
||||
|
||||
@staticmethod
|
||||
def load_config_from_file(path_to_conf=None):
|
||||
if not path_to_conf:
|
||||
logger.error("Please, specify file to load config from")
|
||||
raise SkipTest("File with config is not specified. "
|
||||
"Aborting the test")
|
||||
with open(path_to_conf, 'r') as f:
|
||||
try:
|
||||
config = yaml.load(f)
|
||||
return config
|
||||
except ValueError:
|
||||
logger.error("Check config file for consistency")
|
||||
raise
|
||||
|
||||
def revert_snapshot(self, nodes_count):
|
||||
"""
|
||||
:param nodes_count: number of nodes
|
||||
:return: nothing, but reverts snapshot
|
||||
"""
|
||||
if nodes_count == 1:
|
||||
num = '1'
|
||||
elif nodes_count <= 3:
|
||||
num = '3'
|
||||
elif nodes_count <= 5:
|
||||
num = '5'
|
||||
else:
|
||||
num = '9'
|
||||
self.env.revert_snapshot('ready_with_{}_slaves'.format(num))
|
||||
|
||||
@staticmethod
|
||||
def check_config_for_ceph(attrs):
|
||||
storage = attrs['editable']['storage']
|
||||
options_to_check = ['volumes_ceph', 'objects_ceph', 'images_ceph',
|
||||
'ephemeral_ceph']
|
||||
for option in options_to_check:
|
||||
if storage[option]['value']:
|
||||
pool_size = storage['osd_pool_size']['value']
|
||||
return int(pool_size)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_existed_ceph_nodes_count(conf):
|
||||
nodes = conf['nodes']
|
||||
return len([node for node in nodes if 'ceph-osd' in nodes[node]])
|
||||
|
||||
def add_ceph_nodes(self, count, ceph_nodes_count):
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[count:count + ceph_nodes_count],
|
||||
skip_timesync=True)
|
||||
nodes = {'slave-0{}'.format(i): ['ceph-osd']
|
||||
for i in xrange(count + 1, count + ceph_nodes_count + 1)}
|
||||
self.fuel_web.update_nodes(self.cluster_id, nodes)
|
||||
|
||||
def load_config(self, file_name):
|
||||
conf_path = os.path.dirname(os.path.abspath(__file__))
|
||||
cluster_conf = \
|
||||
self.load_config_from_file(os.path.join(conf_path, file_name))
|
||||
return cluster_conf
|
||||
|
||||
def create_cluster(self, conf):
|
||||
self.cluster_name = '_'.join([self.__class__.__name__, conf['name']])
|
||||
cluster_settings = {
|
||||
"net_provider": conf['network']['net_provider'],
|
||||
"net_segment_type": conf['network']['net_segment_type']}
|
||||
if conf.get('settings'):
|
||||
cluster_settings.update(conf['settings'])
|
||||
|
||||
self.cluster_id = self.fuel_web.create_cluster(
|
||||
name=self.cluster_name,
|
||||
mode=settings.DEPLOYMENT_MODE,
|
||||
settings=cluster_settings)
|
||||
|
||||
def update_nodes(self, conf):
|
||||
self.fuel_web.update_nodes(
|
||||
self.cluster_id,
|
||||
conf['nodes'])
|
||||
|
||||
def deploy_cluster(self):
|
||||
try:
|
||||
self.fuel_web.deploy_cluster_wait(self.cluster_id)
|
||||
except AssertionError:
|
||||
self.env.make_snapshot(
|
||||
"error_" + self.cluster_name, is_make=True)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_cluster_attributes(self):
|
||||
return self.fuel_web.client.get_cluster_attributes(self.cluster_id)
|
||||
|
||||
def update_cluster_attributes(self, new_attrs):
|
||||
try:
|
||||
self.fuel_web.client.update_cluster_attributes(
|
||||
self.cluster_id, new_attrs)
|
||||
except HTTPError:
|
||||
logger.info(
|
||||
"Failed to update cluster attributes, please check logs")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def run_ostf(self):
|
||||
try:
|
||||
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
|
||||
except AssertionError:
|
||||
logger.info("Some OSTF tests are failed. Check logs.")
|
||||
self.env.make_snapshot(
|
||||
"error_" + self.cluster_name, is_make=True)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_1,
|
||||
SetupEnvironment.prepare_slaves_3,
|
||||
SetupEnvironment.prepare_slaves_5,
|
||||
SetupEnvironment.prepare_slaves_9],
|
||||
groups=["deploy_with_redeploy_and_modify_settings"])
|
||||
@log_snapshot_after_test
|
||||
def deploy_with_redeploy_and_modify_settings(self):
|
||||
"""Deploy iteratively clusters from config, modify settings, redeploy
|
||||
|
||||
Scenario:
|
||||
1. Load clusters' configurations from the file
|
||||
2. Revert snapshot with appropriate nodes count
|
||||
3. Create a cluster from config
|
||||
4. Update nodes accordingly to the config
|
||||
5. Deploy the cluster
|
||||
6. Get cluster attributes
|
||||
7. Modify randomly cluster attributes
|
||||
8. Add if it's needed ceph nodes
|
||||
9. Update cluster attributes with changed one
|
||||
10. Redeploy cluster
|
||||
11. Run OSTF
|
||||
12. Go to the next config
|
||||
|
||||
Duration xxx m
|
||||
Snapshot will be made for all failed configurations
|
||||
"""
|
||||
self.show_step(1)
|
||||
for conf in self.load_config('cluster_configs.yaml'):
|
||||
logger.info(
|
||||
"Creating cluster from config with name: {}".format(
|
||||
conf['name']))
|
||||
self.show_step(2, initialize=True)
|
||||
self.revert_snapshot(len(conf['nodes']))
|
||||
self.show_step(3)
|
||||
self.create_cluster(conf)
|
||||
self.show_step(4)
|
||||
self.update_nodes(conf)
|
||||
self.show_step(5)
|
||||
if not self.deploy_cluster():
|
||||
logger.info(
|
||||
"Initial deployment of cluster {} was failed. "
|
||||
"Go to the next config".format(self.cluster_name))
|
||||
continue
|
||||
|
||||
self.show_step(6)
|
||||
attrs = self.get_cluster_attributes()
|
||||
self.show_step(7)
|
||||
changer = SettingsChanger(attrs)
|
||||
logger.info(
|
||||
"The options below will NOT be changed: {}".format(
|
||||
changer.SKIPPED_FIELDS_LIST))
|
||||
changer.make_changes(options=None, randomize=30)
|
||||
new_attrs = changer.attrs
|
||||
self.show_step(8)
|
||||
ceph_nodes_count = self.check_config_for_ceph(new_attrs)
|
||||
existed_ceph_count = self.get_existed_ceph_nodes_count(conf)
|
||||
if ceph_nodes_count > existed_ceph_count:
|
||||
count = len(conf['nodes'])
|
||||
if count + ceph_nodes_count > settings.NODES_COUNT - 1:
|
||||
logger.info("There are not enough nodes to redeploy with "
|
||||
"ceph nodes pool size. Go to the next config")
|
||||
continue
|
||||
self.add_ceph_nodes(count, ceph_nodes_count)
|
||||
|
||||
self.show_step(9)
|
||||
if not self.update_cluster_attributes(new_attrs):
|
||||
continue
|
||||
|
||||
self.show_step(10)
|
||||
if not self.deploy_cluster():
|
||||
logger.info(
|
||||
"Redeployment of cluster {} was failed. "
|
||||
"Go to the next config".format(self.cluster_name))
|
||||
continue
|
||||
else:
|
||||
# Run ostf
|
||||
self.show_step(11)
|
||||
if not self.run_ostf():
|
||||
continue
|
||||
logger.info(
|
||||
"Redeployment and OSTF were successfully "
|
||||
"executed for cluster {}".format(self.cluster_name))
|
||||
|
||||
self.show_step(12)
|
Loading…
Reference in New Issue