Add initial structure and bvt test for murano plugin
Change-Id: Ida9861b7d76895cc2004ff4d17a51061ca7ea2db
This commit is contained in:
parent
0ce4582f6f
commit
de33f8c3f5
|
@ -0,0 +1,58 @@
|
||||||
|
openrc
|
||||||
|
*.py[cod]
|
||||||
|
__cache__
|
||||||
|
.cache
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Packages
|
||||||
|
*.egg
|
||||||
|
*.egg-info
|
||||||
|
dist
|
||||||
|
build
|
||||||
|
eggs
|
||||||
|
parts
|
||||||
|
bin
|
||||||
|
var
|
||||||
|
sdist
|
||||||
|
develop-eggs
|
||||||
|
.installed.cfg
|
||||||
|
lib
|
||||||
|
lib64
|
||||||
|
MANIFEST
|
||||||
|
TAGS
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
.coverage
|
||||||
|
.tox
|
||||||
|
nosetests.xml
|
||||||
|
report.xml
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
|
||||||
|
# Mr Developer
|
||||||
|
.mr.developer.cfg
|
||||||
|
.project
|
||||||
|
.pydevproject
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# Local example
|
||||||
|
example_local.py
|
||||||
|
|
||||||
|
# Local settings
|
||||||
|
local_settings.py
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
doc/_build/
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
|
||||||
|
# Certs
|
||||||
|
/ca.crt
|
||||||
|
/ca.pem
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
from fuelweb_test.tests import base_test_case
|
||||||
|
import six
|
||||||
|
|
||||||
|
from stacklight_tests.helpers import checkers
|
||||||
|
from stacklight_tests.helpers import helpers
|
||||||
|
from stacklight_tests.helpers import remote_ops
|
||||||
|
from stacklight_tests.helpers import ui_tester
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class PluginApi(object):
|
||||||
|
"""Base class to manage StackLight plugins with Fuel."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.test = base_test_case.TestBasic()
|
||||||
|
self.env = self.test.env
|
||||||
|
self.settings = self.get_plugin_settings()
|
||||||
|
self.helpers = helpers.PluginHelper(self.env)
|
||||||
|
self.checkers = checkers
|
||||||
|
self.remote_ops = remote_ops
|
||||||
|
|
||||||
|
def __getattr__(self, item):
|
||||||
|
return getattr(self.test, item)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def base_nodes(self):
|
||||||
|
"""Return a dict mapping nodes to Fuel roles without HA."""
|
||||||
|
return {
|
||||||
|
'slave-01': ['controller'],
|
||||||
|
'slave-02': ['compute', 'cinder'],
|
||||||
|
'slave-03': self.settings.role_name,
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def full_ha_nodes(self):
|
||||||
|
"""Return a dict mapping nodes to Fuel roles with HA."""
|
||||||
|
return {
|
||||||
|
'slave-01': ['controller'],
|
||||||
|
'slave-02': ['controller'],
|
||||||
|
'slave-03': ['controller'],
|
||||||
|
'slave-04': ['compute', 'cinder'],
|
||||||
|
'slave-05': ['compute', 'cinder'],
|
||||||
|
'slave-06': ['compute', 'cinder'],
|
||||||
|
'slave-07': self.settings.role_name,
|
||||||
|
'slave-08': self.settings.role_name,
|
||||||
|
'slave-09': self.settings.role_name,
|
||||||
|
}
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_plugin_settings(self):
|
||||||
|
"""Return a dict with the default plugin's settings.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def prepare_plugin(self):
|
||||||
|
"""Upload and install the plugin on the Fuel master node.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def activate_plugin(self):
|
||||||
|
"""Enable and configure the plugin in the environment.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from contextlib import closing
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from proboscis import asserts
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from murano_plugin_tests.helpers import remote_ops
|
||||||
|
|
||||||
|
|
||||||
|
def check_http_get_response(url, expected_code=200, msg=None, **kwargs):
|
||||||
|
"""Perform a HTTP GET request and assert that the HTTP server replies with
|
||||||
|
the expected code.
|
||||||
|
|
||||||
|
:param url: the requested URL
|
||||||
|
:type url: str
|
||||||
|
:param expected_code: the expected HTTP response code. Defaults to 200
|
||||||
|
:type expected_code: int
|
||||||
|
:param msg: the assertion message. Defaults to None
|
||||||
|
:type msg: str
|
||||||
|
:returns: HTTP response object
|
||||||
|
:rtype: requests.Response
|
||||||
|
"""
|
||||||
|
msg = msg or "%s responded with {0}, expected {1}" % url
|
||||||
|
r = requests.get(url, **kwargs)
|
||||||
|
asserts.assert_equal(
|
||||||
|
r.status_code, expected_code, msg.format(r.status_code, expected_code))
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def check_process_count(remote, process, count):
|
||||||
|
"""Check that the expected number of processes is running on a host.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param process: the process name to match.
|
||||||
|
:type process: str
|
||||||
|
:param count: the number of processes to match.
|
||||||
|
:type count: int
|
||||||
|
:returns: list of PIDs.
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
msg = "Got {got} instances instead of {count} for process {process}."
|
||||||
|
pids = remote_ops.get_pids_of_process(remote, process)
|
||||||
|
asserts.assert_equal(
|
||||||
|
len(pids), count,
|
||||||
|
msg.format(process=process, count=count, got=len(pids)))
|
||||||
|
return pids
|
||||||
|
|
||||||
|
|
||||||
|
def check_port(address, port):
|
||||||
|
"""Check whether or not a TCP port is open.
|
||||||
|
|
||||||
|
:param address: server address
|
||||||
|
:type address: str
|
||||||
|
:param port: server port
|
||||||
|
:type port: str
|
||||||
|
"""
|
||||||
|
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||||
|
return sock.connect_ex((address, port)) == 0
|
|
@ -0,0 +1,600 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
from devops.helpers import helpers
|
||||||
|
from fuelweb_test.helpers import os_actions
|
||||||
|
from fuelweb_test import logger
|
||||||
|
from proboscis import asserts
|
||||||
|
|
||||||
|
from murano_plugin_tests.helpers import remote_ops
|
||||||
|
from murano_plugin_tests import settings
|
||||||
|
|
||||||
|
|
||||||
|
PLUGIN_PACKAGE_RE = re.compile(r'([^/]+)-(\d+\.\d+)-(\d+\.\d+\.\d+)')
|
||||||
|
|
||||||
|
|
||||||
|
class NotFound(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TimeoutException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_name(filename):
|
||||||
|
"""Extract the plugin name from the package filename.
|
||||||
|
|
||||||
|
:param filename: the plugin's filename.
|
||||||
|
:type filename: str
|
||||||
|
:returns: the plugin's name or None if not found
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
m = PLUGIN_PACKAGE_RE.search(filename or '')
|
||||||
|
if m:
|
||||||
|
return m.group(1)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_version(filename):
|
||||||
|
"""Extract the plugin version from the package filename.
|
||||||
|
|
||||||
|
:param filename: the plugin's filename.
|
||||||
|
:type filename: str
|
||||||
|
:returns: the plugin's version or None if not found
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
m = PLUGIN_PACKAGE_RE.search(filename or '')
|
||||||
|
if m:
|
||||||
|
return m.group(3)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_fixture(name):
|
||||||
|
"""Return the full path to a fixture."""
|
||||||
|
path = os.path.join(os.environ.get("WORKSPACE", "./"), "fixtures", name)
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
raise NotFound("File {} not found".format(path))
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
class PluginHelper(object):
|
||||||
|
"""Class for common help functions."""
|
||||||
|
|
||||||
|
def __init__(self, env):
|
||||||
|
self.env = env
|
||||||
|
self.fuel_web = self.env.fuel_web
|
||||||
|
self._cluster_id = None
|
||||||
|
self.nailgun_client = self.fuel_web.client
|
||||||
|
self._os_conn = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cluster_id(self):
|
||||||
|
if self._cluster_id is None:
|
||||||
|
try:
|
||||||
|
self._cluster_id = self.fuel_web.get_last_created_cluster()
|
||||||
|
except urllib2.URLError:
|
||||||
|
raise EnvironmentError("No cluster was created.")
|
||||||
|
return self._cluster_id
|
||||||
|
|
||||||
|
@cluster_id.setter
|
||||||
|
def cluster_id(self, value):
|
||||||
|
self._cluster_id = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def os_conn(self):
|
||||||
|
if self._os_conn is None:
|
||||||
|
self._os_conn = os_actions.OpenStackActions(
|
||||||
|
self.fuel_web.get_public_vip(self.cluster_id))
|
||||||
|
return self._os_conn
|
||||||
|
|
||||||
|
def prepare_plugin(self, plugin_path):
|
||||||
|
"""Upload and install plugin by path."""
|
||||||
|
self.env.admin_actions.upload_plugin(plugin=plugin_path)
|
||||||
|
self.env.admin_actions.install_plugin(
|
||||||
|
plugin_file_name=os.path.basename(plugin_path))
|
||||||
|
|
||||||
|
def activate_plugin(self, name, version, options=None, strict=False):
|
||||||
|
"""Enable and configure a plugin for the cluster.
|
||||||
|
|
||||||
|
:param name: name of the plugin.
|
||||||
|
:type name: str
|
||||||
|
:param version: version of the plugin.
|
||||||
|
:type name: str
|
||||||
|
:param options: configuration of the plugin (optional).
|
||||||
|
:type options: dict
|
||||||
|
:param strict: whether or not to fail when setting an unknown option
|
||||||
|
(default: False).
|
||||||
|
:type options: boolean
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
if options is None:
|
||||||
|
options = {}
|
||||||
|
msg = "Plugin {0} isn't found.".format(name)
|
||||||
|
asserts.assert_true(
|
||||||
|
self.fuel_web.check_plugin_exists(self.cluster_id, name),
|
||||||
|
msg)
|
||||||
|
|
||||||
|
logger.info("Updating settings for plugin {0} ({1}): {2}".format(
|
||||||
|
name, version, options))
|
||||||
|
attributes = self.nailgun_client.get_cluster_attributes(
|
||||||
|
self.cluster_id)
|
||||||
|
attributes = attributes['editable'][name]
|
||||||
|
logger.info("Plugin attrs: {0}".format(attributes))
|
||||||
|
|
||||||
|
plugin_data = None
|
||||||
|
for item in attributes['metadata']['versions']:
|
||||||
|
if item['metadata']['plugin_version'] == version:
|
||||||
|
plugin_data = item
|
||||||
|
break
|
||||||
|
asserts.assert_is_not_none(
|
||||||
|
plugin_data, "Plugin {0} ({1}) is not found".format(name, version))
|
||||||
|
|
||||||
|
attributes['metadata']['enabled'] = True
|
||||||
|
for option, value in options.items():
|
||||||
|
path = option.split("/")
|
||||||
|
for p in path[:-1]:
|
||||||
|
if p in plugin_data:
|
||||||
|
plugin_option = plugin_data[p]
|
||||||
|
else:
|
||||||
|
msg = "Plugin option {} not found".format(option)
|
||||||
|
if strict:
|
||||||
|
raise NotFound(msg)
|
||||||
|
logger.warn(msg)
|
||||||
|
plugin_option = None
|
||||||
|
break
|
||||||
|
|
||||||
|
if plugin_option is not None:
|
||||||
|
plugin_option[path[-1]] = value
|
||||||
|
|
||||||
|
self.nailgun_client.update_cluster_attributes(self.cluster_id, {
|
||||||
|
"editable": {name: attributes}
|
||||||
|
})
|
||||||
|
|
||||||
|
def get_all_ready_nodes(self):
|
||||||
|
return [node for node in
|
||||||
|
self.nailgun_client.list_cluster_nodes(self.cluster_id)
|
||||||
|
if node["status"] == "ready"]
|
||||||
|
|
||||||
|
def create_cluster(self, name=None, settings=None, ssl=False):
|
||||||
|
"""Create a cluster.
|
||||||
|
|
||||||
|
:param name: name of the cluster.
|
||||||
|
:type name: str
|
||||||
|
:param settings: optional dict containing the cluster's configuration.
|
||||||
|
:type settings: dict
|
||||||
|
:returns: the cluster's id
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
name = self.__class__.__name__
|
||||||
|
self._cluster_id = self.env.fuel_web.create_cluster(
|
||||||
|
name=name,
|
||||||
|
settings=settings,
|
||||||
|
mode='ha',
|
||||||
|
configure_ssl=ssl)
|
||||||
|
return self._cluster_id
|
||||||
|
|
||||||
|
def deploy_cluster(self, nodes_roles, verify_network=False,
|
||||||
|
update_interfaces=True, check_services=True):
|
||||||
|
"""Assign roles to nodes and deploy the cluster.
|
||||||
|
|
||||||
|
:param nodes_roles: nodes to roles mapping.
|
||||||
|
:type nodes_roles: dict
|
||||||
|
:param verify_network: whether or not network verification should be
|
||||||
|
run before the deployment (default: False).
|
||||||
|
:type verify_network: boolean
|
||||||
|
:param update_interfaces: whether or not interfaces should be updated
|
||||||
|
before the deployment (default: True).
|
||||||
|
:type update_interfaces: boolean
|
||||||
|
:param check_services: whether or not OSTF tests should run after the
|
||||||
|
deployment (default: True).
|
||||||
|
:type check_services: boolean
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
self.fuel_web.update_nodes(self.cluster_id, nodes_roles,
|
||||||
|
update_interfaces=update_interfaces)
|
||||||
|
if verify_network:
|
||||||
|
self.fuel_web.verify_network(self.cluster_id)
|
||||||
|
self.fuel_web.deploy_cluster_wait(self.cluster_id,
|
||||||
|
check_services=check_services)
|
||||||
|
|
||||||
|
def run_ostf(self, *args, **kwargs):
|
||||||
|
"""Run the OpenStack health checks."""
|
||||||
|
self.fuel_web.run_ostf(self.cluster_id, *args, **kwargs)
|
||||||
|
|
||||||
|
def run_single_ostf(self, test_sets, test_name, *args, **kwargs):
|
||||||
|
"""Run a subset of the OpenStack health checks."""
|
||||||
|
self.fuel_web.run_single_ostf_test(self.cluster_id, test_sets,
|
||||||
|
test_name, *args, **kwargs)
|
||||||
|
|
||||||
|
def add_nodes_to_cluster(self, nodes, redeploy=True, check_services=False):
|
||||||
|
"""Add nodes to the cluster.
|
||||||
|
|
||||||
|
:param nodes: list of nodes with their roles.
|
||||||
|
:type: nodes: dict
|
||||||
|
:param redeploy: whether to redeploy the cluster (default: True).
|
||||||
|
:type redeploy: boolean
|
||||||
|
:param check_services: run OSTF after redeploy (default: False).
|
||||||
|
:type check_services: boolean
|
||||||
|
"""
|
||||||
|
self.fuel_web.update_nodes(
|
||||||
|
self.cluster_id,
|
||||||
|
nodes,
|
||||||
|
)
|
||||||
|
if redeploy:
|
||||||
|
self.fuel_web.deploy_cluster_wait(self.cluster_id,
|
||||||
|
check_services=check_services)
|
||||||
|
|
||||||
|
def remove_nodes_from_cluster(self, nodes, redeploy=True,
|
||||||
|
check_services=False):
|
||||||
|
"""Remove nodes from the cluster.
|
||||||
|
|
||||||
|
:param nodes: list of nodes to remove from the cluster.
|
||||||
|
:type nodes: dict
|
||||||
|
:param redeploy: whether to redeploy the cluster (default: True).
|
||||||
|
:type redeploy: boolean
|
||||||
|
:param check_services: run OSTF after redeploy (default: False).
|
||||||
|
:type check_services: boolean
|
||||||
|
"""
|
||||||
|
self.fuel_web.update_nodes(
|
||||||
|
self.cluster_id,
|
||||||
|
nodes,
|
||||||
|
pending_addition=False, pending_deletion=True,
|
||||||
|
)
|
||||||
|
if redeploy:
|
||||||
|
self.fuel_web.deploy_cluster_wait(self.cluster_id,
|
||||||
|
check_services=check_services)
|
||||||
|
|
||||||
|
def get_master_node_by_role(self, role_name, excluded_nodes_fqdns=()):
|
||||||
|
"""Return the node running as the Designated Controller (DC).
|
||||||
|
"""
|
||||||
|
nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, role_name)
|
||||||
|
nodes = [node for node in nodes
|
||||||
|
if node['fqdn'] not in set(excluded_nodes_fqdns)]
|
||||||
|
with self.fuel_web.get_ssh_for_nailgun_node(nodes[0]) as remote:
|
||||||
|
stdout = remote.check_call(
|
||||||
|
'pcs status cluster | grep "Current DC:"')["stdout"][0]
|
||||||
|
for node in nodes:
|
||||||
|
if node['fqdn'] in stdout:
|
||||||
|
return node
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def full_vip_name(vip_name):
|
||||||
|
return "".join(["vip__", vip_name])
|
||||||
|
|
||||||
|
def get_node_with_vip(self, role_name, vip, exclude_node=None):
|
||||||
|
nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, role_name)
|
||||||
|
lma_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
|
||||||
|
nailgun_nodes)
|
||||||
|
lma_node = None
|
||||||
|
if exclude_node:
|
||||||
|
for node in lma_nodes:
|
||||||
|
if node.name != exclude_node.name:
|
||||||
|
lma_node = node
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
lma_node = lma_nodes[0]
|
||||||
|
return self.fuel_web.get_pacemaker_resource_location(
|
||||||
|
lma_node.name, vip)[0]
|
||||||
|
|
||||||
|
def wait_for_vip_migration(self, old_master, role_name, vip,
|
||||||
|
timeout=5 * 60):
|
||||||
|
logger.info('Waiting for the migration of VIP {}'.format(vip))
|
||||||
|
msg = "VIP {0} has not been migrated away from {1}".format(
|
||||||
|
vip, old_master)
|
||||||
|
helpers.wait(
|
||||||
|
lambda: old_master != self.get_node_with_vip(
|
||||||
|
role_name, vip, exclude_node=old_master),
|
||||||
|
timeout=timeout, timeout_msg=msg)
|
||||||
|
|
||||||
|
def power_off_node(self, node):
|
||||||
|
"""Power off a node.
|
||||||
|
|
||||||
|
:param node: Devops node.
|
||||||
|
:type node: devops node instance
|
||||||
|
"""
|
||||||
|
msg = 'Node {0} has not become offline after hard shutdown'.format(
|
||||||
|
node.name)
|
||||||
|
logger.info('Power off node %s', node.name)
|
||||||
|
node.destroy()
|
||||||
|
logger.info('Wait a %s node offline status', node.name)
|
||||||
|
helpers.wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
|
||||||
|
node)['online'], timeout=60 * 5, timeout_msg=msg)
|
||||||
|
|
||||||
|
def emulate_whole_network_disaster(self, delay_before_recover=5 * 60,
|
||||||
|
wait_become_online=True):
|
||||||
|
"""Simulate a full network outage for all nodes.
|
||||||
|
|
||||||
|
:param delay_before_recover: outage interval in seconds (default: 300).
|
||||||
|
:type delay_before_recover: int
|
||||||
|
:param wait_become_online: whether to wait for nodes to be back online.
|
||||||
|
:type wait_become_online: bool
|
||||||
|
"""
|
||||||
|
nodes = [node for node in self.env.d_env.get_nodes()
|
||||||
|
if node.driver.node_active(node)]
|
||||||
|
|
||||||
|
networks_interfaces = nodes[1].interfaces
|
||||||
|
|
||||||
|
for interface in networks_interfaces:
|
||||||
|
interface.network.block()
|
||||||
|
|
||||||
|
time.sleep(delay_before_recover)
|
||||||
|
|
||||||
|
for interface in networks_interfaces:
|
||||||
|
interface.network.unblock()
|
||||||
|
|
||||||
|
if wait_become_online:
|
||||||
|
self.fuel_web.wait_nodes_get_online_state(nodes[1:])
|
||||||
|
|
||||||
|
def uninstall_plugin(self, plugin_name, plugin_version, exit_code=0,
|
||||||
|
msg=None):
|
||||||
|
"""Remove a plugin.
|
||||||
|
|
||||||
|
:param plugin_name: plugin's name.
|
||||||
|
:type plugin_name: str
|
||||||
|
:param plugin_version: plugin's version.
|
||||||
|
:type plugin_version: str
|
||||||
|
:param exit_code: expected exit code.
|
||||||
|
:type exit_code: int
|
||||||
|
:param msg: message in case of error.
|
||||||
|
:type msg: str
|
||||||
|
"""
|
||||||
|
logger.info("Trying to uninstall {name}({version}) plugin".format(
|
||||||
|
name=plugin_name,
|
||||||
|
version=plugin_version))
|
||||||
|
msg = msg or "Plugin {0} deletion failed: exit code is {1}"
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
exec_res = remote.execute("fuel plugins --remove"
|
||||||
|
" {0}=={1}".format(plugin_name,
|
||||||
|
plugin_version))
|
||||||
|
asserts.assert_equal(
|
||||||
|
exit_code, exec_res['exit_code'],
|
||||||
|
msg.format(plugin_name, exec_res['exit_code']))
|
||||||
|
|
||||||
|
def check_plugin_cannot_be_uninstalled(self, plugin_name, plugin_version):
|
||||||
|
"""Check that the plugin cannot be uninstalled.
|
||||||
|
|
||||||
|
:param plugin_name: plugin's name.
|
||||||
|
:type plugin_name: str
|
||||||
|
:param plugin_version: plugin's version.
|
||||||
|
:type plugin_version: str
|
||||||
|
"""
|
||||||
|
self.uninstall_plugin(
|
||||||
|
plugin_name=plugin_name, plugin_version=plugin_version,
|
||||||
|
exit_code=1,
|
||||||
|
msg='{name}({version}) plugin deletion must not be allowed '
|
||||||
|
'when it is deployed'.format(name=plugin_name,
|
||||||
|
version=plugin_version))
|
||||||
|
|
||||||
|
def get_hostname_by_node_name(self, changed_node):
|
||||||
|
node = self.fuel_web.get_nailgun_node_by_base_name(changed_node)
|
||||||
|
if node is None:
|
||||||
|
raise NotFound("Nailgun node with '{}' in name not found".format(
|
||||||
|
changed_node))
|
||||||
|
return node['hostname']
|
||||||
|
|
||||||
|
def fuel_createmirror(self, option="", exit_code=0):
|
||||||
|
cmd = "fuel-createmirror {0}".format(option)
|
||||||
|
logger.info("Executing '{}' command.".format(cmd))
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
exec_res = remote.execute(cmd)
|
||||||
|
asserts.assert_equal(
|
||||||
|
exit_code, exec_res['exit_code'],
|
||||||
|
'fuel-createmirror failed: {0}'.format(exec_res['stderr']))
|
||||||
|
|
||||||
|
def replace_ubuntu_mirror_with_mos(self):
|
||||||
|
cmds = ["fuel-mirror create -P ubuntu -G mos",
|
||||||
|
"fuel-mirror apply --replace -P ubuntu -G mos"]
|
||||||
|
logger.info("Executing '{}' commands.".format('\n'.join(cmds)))
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
for cmd in cmds:
|
||||||
|
remote.check_call(cmd)
|
||||||
|
|
||||||
|
def fuel_create_repositories(self, nodes):
|
||||||
|
"""Start task to setup repositories on provided nodes
|
||||||
|
|
||||||
|
:param nodes: list of nodes to run task on them
|
||||||
|
:type nodes: list
|
||||||
|
"""
|
||||||
|
nodes_ids = [str(node['id']) for node in nodes]
|
||||||
|
cmd = (
|
||||||
|
"fuel --env {env_id} "
|
||||||
|
"node --node-id {nodes_ids} "
|
||||||
|
"--tasks setup_repositories".format(
|
||||||
|
env_id=self.cluster_id,
|
||||||
|
nodes_ids=' '.join(nodes_ids))
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
"Executing {cmd} command.".format(cmd=cmd))
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
remote.check_call(cmd)
|
||||||
|
|
||||||
|
def run_tasks(self, nodes, tasks=None, start=None, end=None,
|
||||||
|
timeout=10 * 60):
|
||||||
|
"""Run a set of tasks on nodes and wait for completion.
|
||||||
|
|
||||||
|
The list of tasks is provided using the 'tasks' parameter and it can
|
||||||
|
also be specified using the 'start' and/or 'end' parameters. In the
|
||||||
|
latter case, the method will compute the exact set of tasks to be
|
||||||
|
executed.
|
||||||
|
|
||||||
|
:param nodes: list of nodes that should run the tasks
|
||||||
|
:type nodes: list
|
||||||
|
:param tasks: list of tasks to run.
|
||||||
|
:param tasks: list
|
||||||
|
:param start: the task from where to start the deployment.
|
||||||
|
:param start: str
|
||||||
|
:param end: the task where to end the deployment.
|
||||||
|
:param end: str
|
||||||
|
:param timeout: number of seconds to wait for the tasks completion
|
||||||
|
(default: 600).
|
||||||
|
:param timeout: int
|
||||||
|
"""
|
||||||
|
task_ids = []
|
||||||
|
if tasks is not None:
|
||||||
|
task_ids += tasks
|
||||||
|
if start is not None or end is not None:
|
||||||
|
task_ids += [
|
||||||
|
t["id"] for t in self.nailgun_client.get_end_deployment_tasks(
|
||||||
|
self.cluster_id, end=end or '', start=start or '')]
|
||||||
|
node_ids = ",".join([str(node["id"]) for node in nodes])
|
||||||
|
logger.info("Running tasks {0} for nodes {1}".format(
|
||||||
|
",".join(task_ids), node_ids))
|
||||||
|
result = self.nailgun_client.put_deployment_tasks_for_cluster(
|
||||||
|
self.cluster_id, data=task_ids, node_id=node_ids)
|
||||||
|
self.fuel_web.assert_task_success(result, timeout=timeout)
|
||||||
|
|
||||||
|
def apply_maintenance_update(self):
|
||||||
|
"""Method applies maintenance updates on whole cluster."""
|
||||||
|
logger.info("Applying maintenance updates on master node")
|
||||||
|
self.env.admin_install_updates()
|
||||||
|
|
||||||
|
logger.info("Applying maintenance updates on slaves")
|
||||||
|
slaves_mu_script_url = (
|
||||||
|
"https://github.com/Mirantis/tools-sustaining/"
|
||||||
|
"raw/master/scripts/mos_apply_mu.py")
|
||||||
|
|
||||||
|
path_to_mu_script = "/tmp/mos_apply_mu.py"
|
||||||
|
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
remote.check_call("wget {uri} -O {path}".format(
|
||||||
|
uri=slaves_mu_script_url,
|
||||||
|
path=path_to_mu_script)
|
||||||
|
)
|
||||||
|
|
||||||
|
remote.check_call(
|
||||||
|
"python {path} "
|
||||||
|
"--env-id={identifier} "
|
||||||
|
"--user={username} "
|
||||||
|
"--pass={password} "
|
||||||
|
"--tenant={tenant_name} --update".format(
|
||||||
|
path=path_to_mu_script,
|
||||||
|
identifier=self.cluster_id,
|
||||||
|
**settings.KEYSTONE_CREDS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, roles=['controller', ])
|
||||||
|
|
||||||
|
computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, roles=['compute', ])
|
||||||
|
|
||||||
|
logger.info("Restarting all OpenStack services")
|
||||||
|
|
||||||
|
logger.info("Restarting services on controllers")
|
||||||
|
ha_services = (
|
||||||
|
"p_heat-engine",
|
||||||
|
"p_neutron-plugin-openvswitch-agent",
|
||||||
|
"p_neutron-dhcp-agent",
|
||||||
|
"p_neutron-metadata-agent",
|
||||||
|
"p_neutron-l3-agent")
|
||||||
|
non_ha_services = (
|
||||||
|
"heat-api-cloudwatch",
|
||||||
|
"heat-api-cfn",
|
||||||
|
"heat-api",
|
||||||
|
"cinder-api",
|
||||||
|
"cinder-scheduler",
|
||||||
|
"nova-objectstore",
|
||||||
|
"nova-cert",
|
||||||
|
"nova-api",
|
||||||
|
"nova-consoleauth",
|
||||||
|
"nova-conductor",
|
||||||
|
"nova-scheduler",
|
||||||
|
"nova-novncproxy",
|
||||||
|
"neutron-server",
|
||||||
|
)
|
||||||
|
for controller in controllers:
|
||||||
|
with self.fuel_web.get_ssh_for_nailgun_node(
|
||||||
|
controller) as remote:
|
||||||
|
for service in ha_services:
|
||||||
|
remote_ops.manage_pacemaker_service(remote, service)
|
||||||
|
for service in non_ha_services:
|
||||||
|
remote_ops.manage_initctl_service(remote, service)
|
||||||
|
|
||||||
|
logger.info("Restarting services on computes")
|
||||||
|
compute_services = (
|
||||||
|
"neutron-plugin-openvswitch-agent",
|
||||||
|
"nova-compute",
|
||||||
|
)
|
||||||
|
for compute in computes:
|
||||||
|
with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:
|
||||||
|
for service in compute_services:
|
||||||
|
remote_ops.manage_initctl_service(remote, service)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def check_notifications(got_list, expected_list):
|
||||||
|
for event_type in expected_list:
|
||||||
|
asserts.assert_true(
|
||||||
|
event_type in got_list, "{} event type not found in {}".format(
|
||||||
|
event_type, got_list))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def wait_for_resource_status(resource_client, resource, expected_status,
|
||||||
|
timeout=180, interval=30):
|
||||||
|
start = time.time()
|
||||||
|
finish = start + timeout
|
||||||
|
while start < finish:
|
||||||
|
curr_state = resource_client.get(resource).status
|
||||||
|
if curr_state == expected_status:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
"Instance is not in {} status".format(expected_status))
|
||||||
|
time.sleep(interval)
|
||||||
|
start = time.time()
|
||||||
|
raise TimeoutException("Timed out waiting to become {}".format(
|
||||||
|
expected_status))
|
||||||
|
|
||||||
|
def get_fuel_release(self):
|
||||||
|
version = self.nailgun_client.get_api_version()
|
||||||
|
return version.get('release')
|
||||||
|
|
||||||
|
def check_pacemaker_resource(self, resource_name, role):
|
||||||
|
"""Check that the pacemaker resource is started on nodes with given
|
||||||
|
role
|
||||||
|
:param resource_name: the name of the pacemaker resource
|
||||||
|
:type resource_name: str
|
||||||
|
:param role: the role of node when pacemaker is running
|
||||||
|
:type role: str
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
cluster_id = self.cluster_id
|
||||||
|
n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
cluster_id, [role])
|
||||||
|
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
|
||||||
|
pcm_nodes = ' '.join(self.fuel_web.get_pcm_nodes(
|
||||||
|
d_ctrls[0].name, pure=True)['Online'])
|
||||||
|
logger.info("pacemaker nodes are {0}".format(pcm_nodes))
|
||||||
|
resource_nodes = self.fuel_web.get_pacemaker_resource_location(
|
||||||
|
d_ctrls[0].name, resource_name)
|
||||||
|
for resource_node in resource_nodes:
|
||||||
|
logger.info("Check resource [{0}] on node {1}".format(
|
||||||
|
resource_name, resource_node.name))
|
||||||
|
config = self.fuel_web.get_pacemaker_config(resource_node.name)
|
||||||
|
asserts.assert_not_equal(
|
||||||
|
re.search(
|
||||||
|
"Clone Set: clone_{0} \[{0}\]\s+Started: \[ {1} \]".format(
|
||||||
|
resource_name, pcm_nodes), config), None,
|
||||||
|
'Resource [{0}] is not properly configured'.format(
|
||||||
|
resource_name))
|
|
@ -0,0 +1,110 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_bridged_interfaces_for_node(remote, excluded_criteria=None):
|
||||||
|
"""Return all network bridges for a node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param excluded_criteria: regular expression to filter out items
|
||||||
|
:type excluded_criteria: str
|
||||||
|
:returns: list of interfaces
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
# TODO(rpromyshlennikov): do filtration on python side
|
||||||
|
excluded_criteria_cmd = (
|
||||||
|
" | grep -v '%s'" % excluded_criteria
|
||||||
|
if excluded_criteria else "")
|
||||||
|
cmd = "brctl show | awk '/br-/{{print $1}}'{excluded}".format(
|
||||||
|
excluded=excluded_criteria_cmd)
|
||||||
|
interfaces = remote.check_call(cmd)["stdout"]
|
||||||
|
return [iface.strip() for iface in interfaces]
|
||||||
|
|
||||||
|
|
||||||
|
def switch_interface(remote, interface, up=True):
|
||||||
|
"""Turn a network interface up or down.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param interface: interface name.
|
||||||
|
:type interface: str
|
||||||
|
:param up: whether the interface should be turned up (default: True).
|
||||||
|
:type up: boolean
|
||||||
|
"""
|
||||||
|
method = 'up' if up else 'down'
|
||||||
|
cmd = "if{method} {interface}".format(method=method,
|
||||||
|
interface=interface)
|
||||||
|
remote.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def simulate_network_interrupt_on_node(remote, interval=30):
|
||||||
|
"""Simulate a network outage on a node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param interval: outage duration in seconds (default: 30).
|
||||||
|
:type interval: int
|
||||||
|
"""
|
||||||
|
cmd = (
|
||||||
|
"(/sbin/iptables -I INPUT -j DROP && "
|
||||||
|
"sleep {interval} && "
|
||||||
|
"/sbin/iptables -D INPUT -j DROP) 2>&1>/dev/null &".format(
|
||||||
|
interval=interval))
|
||||||
|
remote.execute(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def get_pids_of_process(remote, name):
|
||||||
|
"""Get PIDs of process by its name.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param name: process name.
|
||||||
|
:type name: str
|
||||||
|
:returns: list of PIDs.
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
cmd = "pidof {}".format(name)
|
||||||
|
result = remote.execute(cmd)
|
||||||
|
if result['exit_code'] != 0:
|
||||||
|
return []
|
||||||
|
return result['stdout'][0].strip().split()
|
||||||
|
|
||||||
|
|
||||||
|
def manage_pacemaker_service(remote, name, operation="restart"):
|
||||||
|
"""Operate HA service on remote node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param name: service name.
|
||||||
|
:type name: str
|
||||||
|
:param operation: type of operation, usually start, stop or restart.
|
||||||
|
:type operation: str
|
||||||
|
"""
|
||||||
|
remote.check_call("crm resource {operation} {service}".format(
|
||||||
|
operation=operation, service=name))
|
||||||
|
|
||||||
|
|
||||||
|
def manage_initctl_service(remote, name, operation="restart"):
|
||||||
|
"""Operate service on remote node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param name: service name.
|
||||||
|
:type name: str
|
||||||
|
:param operation: type of operation, usually start, stop or restart.
|
||||||
|
:type operation: str
|
||||||
|
"""
|
||||||
|
remote.check_call("initctl {operation} {service}".format(
|
||||||
|
operation=operation, service=name))
|
|
@ -0,0 +1,41 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from murano_plugin_tests import base_test
|
||||||
|
from murano_plugin_tests.murano_plugin import plugin_settings
|
||||||
|
|
||||||
|
|
||||||
|
class MuranoPluginApi(base_test.PluginApi):
|
||||||
|
def get_plugin_vip(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_plugin_settings(self):
|
||||||
|
return plugin_settings
|
||||||
|
|
||||||
|
def prepare_plugin(self):
|
||||||
|
self.helpers.prepare_plugin(self.settings.plugin_path)
|
||||||
|
|
||||||
|
def run_ostf(self):
|
||||||
|
self.helpers.run_ostf(test_sets=['sanity', 'smoke', 'ha',
|
||||||
|
'tests_platform'])
|
||||||
|
|
||||||
|
def activate_plugin(self, options=None):
|
||||||
|
if options is None:
|
||||||
|
options = self.settings.default_options
|
||||||
|
self.helpers.activate_plugin(
|
||||||
|
self.settings.name, self.settings.version, options)
|
||||||
|
|
||||||
|
def uninstall_plugin(self):
|
||||||
|
return self.helpers.uninstall_plugin(self.settings.name,
|
||||||
|
self.settings.version)
|
|
@ -0,0 +1,44 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from murano_plugin_tests.helpers import helpers
|
||||||
|
from murano_plugin_tests import settings
|
||||||
|
|
||||||
|
|
||||||
|
name = 'detach-murano'
|
||||||
|
role_name = ['murano-node']
|
||||||
|
plugin_path = settings.MURANO_PLUGIN_PATH
|
||||||
|
version = helpers.get_plugin_version(plugin_path)
|
||||||
|
|
||||||
|
murano_user = 'murano'
|
||||||
|
murano_db_password = 'murano_db_password'
|
||||||
|
glare = True
|
||||||
|
a_o_o = 'http://storage.apps.openstack.org/'
|
||||||
|
|
||||||
|
default_options = {
|
||||||
|
'murano_user_password/value': murano_user,
|
||||||
|
'murano_db_password/value': murano_db_password,
|
||||||
|
'murano_glance_artifacts/value': glare,
|
||||||
|
'murano_repo_url/value': a_o_o
|
||||||
|
}
|
||||||
|
|
||||||
|
murano_options = default_options
|
||||||
|
|
||||||
|
base_nodes = {
|
||||||
|
'slave-01': ['controller'],
|
||||||
|
'slave-02': ['controller'],
|
||||||
|
'slave-03': ['controller'],
|
||||||
|
'slave-04': ['compute', 'cinder'],
|
||||||
|
'slave-05': ['compute', role_name],
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||||
|
from plugin_settings import base_nodes
|
||||||
|
from proboscis import test
|
||||||
|
from murano_plugin_tests.murano_plugin import api
|
||||||
|
|
||||||
|
|
||||||
|
@test(groups=["plugins"])
|
||||||
|
class TestMuranoPluginBvt(api.MuranoPluginApi):
|
||||||
|
"""Class for bvt testing the Murano plugin."""
|
||||||
|
|
||||||
|
@test(depends_on_groups=['prepare_slaves_5'],
|
||||||
|
groups=["deploy_murano_bvt", "deploy",
|
||||||
|
"murano", "bvt"])
|
||||||
|
@log_snapshot_after_test
|
||||||
|
def deploy_murano_plugin_ha(self):
|
||||||
|
"""Deploy a cluster with the Murano plugin
|
||||||
|
|
||||||
|
Scenario:
|
||||||
|
1. Upload the Murano plugin to the master node
|
||||||
|
2. Install the plugin
|
||||||
|
3. Create the cluster
|
||||||
|
4. Add 3 node with controller role
|
||||||
|
5. Add 1 node with compute and cinder roles
|
||||||
|
6. Add 1 node with compute and murano-node roles
|
||||||
|
7. Deploy the cluster
|
||||||
|
8. Run OSTF
|
||||||
|
|
||||||
|
Duration 120m
|
||||||
|
Snapshot deploy_murano_bvt
|
||||||
|
"""
|
||||||
|
self.check_run("deploy_ceilometer_redis")
|
||||||
|
self.env.revert_snapshot("ready_with_5_slaves")
|
||||||
|
|
||||||
|
self.prepare_plugin()
|
||||||
|
|
||||||
|
self.helpers.create_cluster(name=self.__class__.__name__)
|
||||||
|
|
||||||
|
self.activate_plugin()
|
||||||
|
|
||||||
|
self.helpers.deploy_cluster(base_nodes)
|
||||||
|
|
||||||
|
self.run_ostf()
|
||||||
|
|
||||||
|
self.env.make_snapshot("deploy_murano_bvt", is_make=True)
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from nose import plugins
|
||||||
|
from paramiko import transport
|
||||||
|
|
||||||
|
|
||||||
|
class CloseSSHConnectionsPlugin(plugins.Plugin):
|
||||||
|
"""Closes all paramiko's ssh connections after each test case
|
||||||
|
|
||||||
|
Plugin fixes proboscis disability to run cleanup of any kind.
|
||||||
|
'afterTest' calls _join_lingering_threads function from paramiko,
|
||||||
|
which stops all threads (set the state to inactive and joins for 10s)
|
||||||
|
"""
|
||||||
|
name = 'closesshconnections'
|
||||||
|
|
||||||
|
def options(self, parser, env=os.environ):
|
||||||
|
super(CloseSSHConnectionsPlugin, self).options(parser, env=env)
|
||||||
|
|
||||||
|
def configure(self, options, conf):
|
||||||
|
super(CloseSSHConnectionsPlugin, self).configure(options, conf)
|
||||||
|
self.enabled = True
|
||||||
|
|
||||||
|
def afterTest(self, *args, **kwargs):
|
||||||
|
transport._join_lingering_threads()
|
||||||
|
|
||||||
|
|
||||||
|
def import_tests():
|
||||||
|
from murano_plugin import test_murano_plugin_bvt #noqa
|
||||||
|
|
||||||
|
|
||||||
|
def run_tests():
|
||||||
|
from proboscis import TestProgram # noqa
|
||||||
|
import_tests()
|
||||||
|
|
||||||
|
# Run Proboscis and exit.
|
||||||
|
TestProgram(
|
||||||
|
addplugins=[CloseSSHConnectionsPlugin()]
|
||||||
|
).run_and_exit()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import_tests()
|
||||||
|
run_tests()
|
|
@ -0,0 +1,7 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
from fuelweb_test.settings import * # noqa
|
||||||
|
|
||||||
|
# Murano plugins
|
||||||
|
|
||||||
|
MURANO_PLUGIN_PATH = os.environ.get('MURANO_PLUGIN_PATH')
|
|
@ -0,0 +1,14 @@
|
||||||
|
export ENV_NAME=fuel_murano_plugin
|
||||||
|
export VENV_PATH=$HOME/venv-murano-tests
|
||||||
|
|
||||||
|
# Change this if you didn't use the default database password
|
||||||
|
export DEVOPS_DB_PASSWORD=devops
|
||||||
|
|
||||||
|
# Nodes characteristics
|
||||||
|
export ADMIN_NODE_MEMORY=4096
|
||||||
|
export ADMIN_NODE_CPU=2
|
||||||
|
export SLAVE_NODE_MEMORY=6144
|
||||||
|
|
||||||
|
# Locations for fuel-qa, MOS and plugins artefacts
|
||||||
|
export FUELQA_GITREF=stable/9.0
|
||||||
|
export ISO_PATH=$HOME/iso/MirantisOpenStack-9.0.iso
|
|
@ -0,0 +1,6 @@
|
||||||
|
git+git://github.com/openstack/fuel-devops.git@2.9.20
|
||||||
|
oslo.i18n>=3.1.0 # the more recent python-*client (dependencies of fuel-qa) require at least this version of oslo.i18n
|
||||||
|
PyYAML
|
||||||
|
requests
|
||||||
|
six
|
||||||
|
tox
|
|
@ -0,0 +1,16 @@
|
||||||
|
[tox]
|
||||||
|
minversion = 1.6
|
||||||
|
skipsdist = True
|
||||||
|
envlist = pep8,docs
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
deps = hacking
|
||||||
|
commands = flake8
|
||||||
|
distribute = false
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
filename=*.py
|
||||||
|
ignore = H405, H703
|
||||||
|
show-source = true
|
||||||
|
exclude = .venv,.git,.tox,dist,doc,*egg,*lib/python*,build,releasenotes,tmp,utils/fuel-qa-builder/venv*
|
||||||
|
max-complexity=25
|
|
@ -0,0 +1,11 @@
|
||||||
|
recursive-include fuelweb_test *
|
||||||
|
recursive-include gates_tests *
|
||||||
|
|
||||||
|
include README.md
|
||||||
|
include fuelweb_test/requirements.txt
|
||||||
|
exclude .gitignore
|
||||||
|
exclude .gitreview
|
||||||
|
exclude *.bak
|
||||||
|
|
||||||
|
global-exclude *.pyc
|
||||||
|
global-exclude *.log
|
|
@ -0,0 +1,59 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Script to setup a Python virtual environment (if needed) and install all the
|
||||||
|
# project's dependencies
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Initialize the variables
|
||||||
|
BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
|
VENV_PATH=${VENV_PATH:-"${BASE_DIR}"/venv-murano-tests}
|
||||||
|
FUELQA_GITREF=${FUELQA_GITREF:-stable/mitaka}
|
||||||
|
|
||||||
|
# Create the virtual environment if it doesn't exist yet
|
||||||
|
if [[ ! -f "$VENV_PATH"/bin/activate ]]; then
|
||||||
|
if ! which virtualenv; then
|
||||||
|
echo 'Cannot find the virtualenv executable!'
|
||||||
|
echo 'You should install it either using pip or your distribution package manager.'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Creating virtual environment in '$VENV_PATH'"
|
||||||
|
virtualenv "$VENV_PATH"
|
||||||
|
. "$VENV_PATH"/bin/activate
|
||||||
|
|
||||||
|
# Always upgrade to the latest version of pip
|
||||||
|
pip install -U pip
|
||||||
|
else
|
||||||
|
. "$VENV_PATH"/bin/activate
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Using virtual environment at '$VIRTUAL_ENV'"
|
||||||
|
|
||||||
|
if [[ "$(pip show fuelweb-test)" == "" ]]; then
|
||||||
|
# Install fuel-qa in the virtual environment
|
||||||
|
echo "Checking out fuel-qa, reference: $FUELQA_GITREF"
|
||||||
|
FUELQA_DIR=$(mktemp -d)
|
||||||
|
git clone https://github.com/openstack/fuel-qa.git -- "$FUELQA_DIR"
|
||||||
|
|
||||||
|
pushd "$FUELQA_DIR"
|
||||||
|
git checkout "$FUELQA_GITREF"
|
||||||
|
|
||||||
|
cp "${BASE_DIR}"/{MANIFEST.in,setup.py} ./
|
||||||
|
|
||||||
|
python setup.py sdist
|
||||||
|
pip install dist/fuelweb_test*.tar.gz
|
||||||
|
|
||||||
|
# Clean up stuff
|
||||||
|
popd
|
||||||
|
rm -rf "$FUELQA_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install the project's dependencies
|
||||||
|
pip install -r"${BASE_DIR}/../../requirements.txt"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo
|
||||||
|
echo "The setup is now complete."
|
||||||
|
echo "Run this command in your shell to activate your Python virtual environment:"
|
||||||
|
echo " . $VIRTUAL_ENV/bin/activate"
|
|
@ -0,0 +1,41 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import os
|
||||||
|
from setuptools import setup
|
||||||
|
|
||||||
|
|
||||||
|
def read(fname):
|
||||||
|
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||||
|
|
||||||
|
|
||||||
|
def get_requirements_list(requirements):
|
||||||
|
all_requirements = read(requirements)
|
||||||
|
all_requirements = [req for req in all_requirements.splitlines()
|
||||||
|
if 'devops' not in req and 'launchpadlib' not in req]
|
||||||
|
return all_requirements
|
||||||
|
|
||||||
|
|
||||||
|
setup(
|
||||||
|
name='fuelweb_test',
|
||||||
|
version=9.0,
|
||||||
|
description='Fuel-qa fuelweb package',
|
||||||
|
|
||||||
|
url='http://www.openstack.org/',
|
||||||
|
author='OpenStack',
|
||||||
|
author_email='openstack-dev@lists.openstack.org',
|
||||||
|
packages=['fuelweb_test', 'gates_tests'],
|
||||||
|
include_package_data=True,
|
||||||
|
classifiers=[
|
||||||
|
'Environment :: Linux',
|
||||||
|
'Intended Audience :: Developers',
|
||||||
|
'Intended Audience :: Information Technology',
|
||||||
|
'License :: OSI Approved :: Apache Software License',
|
||||||
|
'Operating System :: POSIX :: Linux',
|
||||||
|
'Programming Language :: Python',
|
||||||
|
'Programming Language :: Python :: 2',
|
||||||
|
'Programming Language :: Python :: 2.7',
|
||||||
|
'Programming Language :: Python :: 3',
|
||||||
|
'Programming Language :: Python :: 3.4',
|
||||||
|
],
|
||||||
|
install_requires=get_requirements_list('./fuelweb_test/requirements.txt'),
|
||||||
|
)
|
|
@ -0,0 +1,489 @@
|
||||||
|
#!/bin/sh
|
||||||
|
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
# functions
|
||||||
|
|
||||||
|
INVALIDOPTS_ERR=100
|
||||||
|
NOJOBNAME_ERR=101
|
||||||
|
NOISOPATH_ERR=102
|
||||||
|
NOTASKNAME_ERR=103
|
||||||
|
NOWORKSPACE_ERR=104
|
||||||
|
DEEPCLEAN_ERR=105
|
||||||
|
MAKEISO_ERR=106
|
||||||
|
NOISOFOUND_ERR=107
|
||||||
|
COPYISO_ERR=108
|
||||||
|
SYMLINKISO_ERR=109
|
||||||
|
CDWORKSPACE_ERR=110
|
||||||
|
ISODOWNLOAD_ERR=111
|
||||||
|
INVALIDTASK_ERR=112
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
|
||||||
|
export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000}
|
||||||
|
export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true}
|
||||||
|
|
||||||
|
# Export specified settings
|
||||||
|
if [ -z $NODE_VOLUME_SIZE ]; then export NODE_VOLUME_SIZE=350; fi
|
||||||
|
if [ -z $OPENSTACK_RELEASE ]; then export OPENSTACK_RELEASE=Ubuntu; fi
|
||||||
|
if [ -z $ENV_NAME ]; then export ENV_NAME="murano"; fi
|
||||||
|
if [ -z $ADMIN_NODE_MEMORY ]; then export ADMIN_NODE_MEMORY=3072; fi
|
||||||
|
if [ -z $ADMIN_NODE_CPU ]; then export ADMIN_NODE_CPU=2; fi
|
||||||
|
if [ -z $SLAVE_NODE_MEMORY ]; then export SLAVE_NODE_MEMORY=6144; fi
|
||||||
|
if [ -z $SLAVE_NODE_CPU ]; then export SLAVE_NODE_CPU=2; fi
|
||||||
|
|
||||||
|
|
||||||
|
ShowHelp() {
|
||||||
|
cat << EOF
|
||||||
|
System Tests Script
|
||||||
|
|
||||||
|
It can perform several actions depending on Jenkins JOB_NAME it's ran from
|
||||||
|
or it can take names from exported environment variables or command line options
|
||||||
|
if you do need to override them.
|
||||||
|
|
||||||
|
-w (dir) - Path to workspace where fuelweb git repository was checked out.
|
||||||
|
Uses Jenkins' WORKSPACE if not set
|
||||||
|
-e (name) - Directly specify environment name used in tests
|
||||||
|
Uses ENV_NAME variable is set.
|
||||||
|
-j (name) - Name of this job. Determines ISO name, Task name and used by tests.
|
||||||
|
Uses Jenkins' JOB_NAME if not set
|
||||||
|
-v - Do not use virtual environment
|
||||||
|
-V (dir) - Path to python virtual environment
|
||||||
|
-i (file) - Full path to ISO file to build or use for tests.
|
||||||
|
Made from iso dir and name if not set.
|
||||||
|
-t (name) - Name of task this script should perform. Should be one of defined ones.
|
||||||
|
Taken from Jenkins' job's suffix if not set.
|
||||||
|
-o (str) - Allows you any extra command line option to run test job if you
|
||||||
|
want to use some parameters.
|
||||||
|
-a (str) - Allows you to path NOSE_ATTR to the test job if you want
|
||||||
|
to use some parameters.
|
||||||
|
-A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes
|
||||||
|
as python expressions.
|
||||||
|
-m (name) - Use this mirror to build ISO from.
|
||||||
|
Uses 'srt' if not set.
|
||||||
|
-U - ISO URL for tests.
|
||||||
|
Null by default.
|
||||||
|
-r (yes/no) - Should built ISO file be places with build number tag and
|
||||||
|
symlinked to the last build or just copied over the last file.
|
||||||
|
-b (num) - Allows you to override Jenkins' build number if you need to.
|
||||||
|
-l (dir) - Path to logs directory. Can be set by LOGS_DIR environment variable.
|
||||||
|
Uses WORKSPACE/logs if not set.
|
||||||
|
-d - Dry run mode. Only show what would be done and do nothing.
|
||||||
|
Useful for debugging.
|
||||||
|
-k - Keep previously created test environment before tests run
|
||||||
|
-K - Keep test environment after tests are finished
|
||||||
|
-h - Show this help page
|
||||||
|
|
||||||
|
Most variables uses guesing from Jenkins' job name but can be overriden
|
||||||
|
by exported variable before script is run or by one of command line options.
|
||||||
|
|
||||||
|
You can override following variables using export VARNAME="value" before running this script
|
||||||
|
WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually
|
||||||
|
JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name.
|
||||||
|
|
||||||
|
If task name is "iso" it will make iso file
|
||||||
|
Other defined names will run Nose tests using previously built ISO file.
|
||||||
|
|
||||||
|
ISO file name is taken from job name prefix
|
||||||
|
Task name is taken from job name suffix
|
||||||
|
Separator is one dot '.'
|
||||||
|
|
||||||
|
For example if JOB_NAME is:
|
||||||
|
mytest.somestring.iso
|
||||||
|
ISO name: mytest.iso
|
||||||
|
Task name: iso
|
||||||
|
If ran with such JOB_NAME iso file with name mytest.iso will be created
|
||||||
|
|
||||||
|
If JOB_NAME is:
|
||||||
|
mytest.somestring.node
|
||||||
|
ISO name: mytest.iso
|
||||||
|
Task name: node
|
||||||
|
If script was run with this JOB_NAME node tests will be using ISO file mytest.iso.
|
||||||
|
|
||||||
|
First you should run mytest.somestring.iso job to create mytest.iso.
|
||||||
|
Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
GlobalVariables() {
|
||||||
|
# where built iso's should be placed
|
||||||
|
# use hardcoded default if not set before by export
|
||||||
|
ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}"
|
||||||
|
|
||||||
|
# name of iso file
|
||||||
|
# taken from jenkins job prefix
|
||||||
|
# if not set before by variable export
|
||||||
|
if [ -z "${ISO_NAME}" ]; then
|
||||||
|
ISO_NAME="${JOB_NAME%.*}.iso"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# full path where iso file should be placed
|
||||||
|
# make from iso name and path to iso shared directory
|
||||||
|
# if was not overriden by options or export
|
||||||
|
if [ -z "${ISO_PATH}" ]; then
|
||||||
|
ISO_PATH="${ISO_DIR}/${ISO_NAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# what task should be ran
|
||||||
|
# it's taken from jenkins job name suffix if not set by options
|
||||||
|
if [ -z "${TASK_NAME}" ]; then
|
||||||
|
TASK_NAME="${JOB_NAME##*.}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# do we want to keep iso's for each build or just copy over single file
|
||||||
|
ROTATE_ISO="${ROTATE_ISO:=yes}"
|
||||||
|
|
||||||
|
# choose mirror to build iso from. Default is 'srt' for Saratov's mirror
|
||||||
|
# you can change mirror by exporting USE_MIRROR variable before running this script
|
||||||
|
USE_MIRROR="${USE_MIRROR:=srt}"
|
||||||
|
|
||||||
|
# only show what commands would be executed but do nothing
|
||||||
|
# this feature is useful if you want to debug this script's behaviour
|
||||||
|
DRY_RUN="${DRY_RUN:=no}"
|
||||||
|
|
||||||
|
VENV="${VENV:=yes}"
|
||||||
|
}
|
||||||
|
|
||||||
|
GetoptsVariables() {
|
||||||
|
while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:dkKe:v:h" opt; do
|
||||||
|
case $opt in
|
||||||
|
w)
|
||||||
|
WORKSPACE="${OPTARG}"
|
||||||
|
;;
|
||||||
|
j)
|
||||||
|
JOB_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
i)
|
||||||
|
ISO_PATH="${OPTARG}"
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
TASK_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
o)
|
||||||
|
TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}"
|
||||||
|
;;
|
||||||
|
a)
|
||||||
|
NOSE_ATTR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
A)
|
||||||
|
NOSE_EVAL_ATTR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
m)
|
||||||
|
USE_MIRROR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
U)
|
||||||
|
ISO_URL="${OPTARG}"
|
||||||
|
;;
|
||||||
|
r)
|
||||||
|
ROTATE_ISO="${OPTARG}"
|
||||||
|
;;
|
||||||
|
b)
|
||||||
|
BUILD_NUMBER="${OPTARG}"
|
||||||
|
;;
|
||||||
|
V)
|
||||||
|
VENV_PATH="${OPTARG}"
|
||||||
|
;;
|
||||||
|
l)
|
||||||
|
LOGS_DIR="${OPTARG}"
|
||||||
|
;;
|
||||||
|
k)
|
||||||
|
KEEP_BEFORE="yes"
|
||||||
|
;;
|
||||||
|
K)
|
||||||
|
KEEP_AFTER="yes"
|
||||||
|
;;
|
||||||
|
e)
|
||||||
|
ENV_NAME="${OPTARG}"
|
||||||
|
;;
|
||||||
|
d)
|
||||||
|
DRY_RUN="yes"
|
||||||
|
;;
|
||||||
|
v)
|
||||||
|
VENV="no"
|
||||||
|
;;
|
||||||
|
h)
|
||||||
|
ShowHelp
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
\?)
|
||||||
|
echo "Invalid option: -$OPTARG"
|
||||||
|
ShowHelp
|
||||||
|
exit $INVALIDOPTS_ERR
|
||||||
|
;;
|
||||||
|
:)
|
||||||
|
echo "Option -$OPTARG requires an argument."
|
||||||
|
ShowHelp
|
||||||
|
exit $INVALIDOPTS_ERR
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
CheckVariables() {
|
||||||
|
|
||||||
|
if [ -z "${JOB_NAME}" ]; then
|
||||||
|
echo "Error! JOB_NAME is not set!"
|
||||||
|
exit $NOJOBNAME_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${ISO_PATH}" ]; then
|
||||||
|
echo "Error! ISO_PATH is not set!"
|
||||||
|
exit $NOISOPATH_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${TASK_NAME}" ]; then
|
||||||
|
echo "Error! TASK_NAME is not set!"
|
||||||
|
exit $NOTASKNAME_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${WORKSPACE}" ]; then
|
||||||
|
echo "Error! WORKSPACE is not set!"
|
||||||
|
exit $NOWORKSPACE_ERR
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
MakeISO() {
|
||||||
|
# Create iso file to be used in tests
|
||||||
|
|
||||||
|
# clean previous garbage
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo make deep_clean
|
||||||
|
else
|
||||||
|
make deep_clean
|
||||||
|
fi
|
||||||
|
ec="${?}"
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Deep clean failed!"
|
||||||
|
exit $DEEPCLEAN_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
# create ISO file
|
||||||
|
export USE_MIRROR
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo make iso
|
||||||
|
else
|
||||||
|
make iso
|
||||||
|
fi
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error making ISO!"
|
||||||
|
exit $MAKEISO_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
ISO="${WORKSPACE}/build/iso/fuel.iso"
|
||||||
|
else
|
||||||
|
ISO="`ls ${WORKSPACE}/build/iso/*.iso | head -n 1`"
|
||||||
|
# check that ISO file exists
|
||||||
|
if [ ! -f "${ISO}" ]; then
|
||||||
|
echo "Error! ISO file not found!"
|
||||||
|
exit $NOISOFOUND_ERR
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# copy ISO file to storage dir
|
||||||
|
# if rotation is enabled and build number is available
|
||||||
|
# save iso to tagged file and symlink to the last build
|
||||||
|
# if rotation is not enabled just copy iso to iso_dir
|
||||||
|
|
||||||
|
if [ "${ROTATE_ISO}" = "yes" -a "${BUILD_NUMBER}" != "" ]; then
|
||||||
|
# copy iso file to shared dir with revision tagged name
|
||||||
|
NEW_BUILD_ISO_PATH="${ISO_PATH#.iso}_${BUILD_NUMBER}.iso"
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
|
||||||
|
else
|
||||||
|
cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
|
||||||
|
fi
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Copy ${ISO} to ${NEW_BUILD_ISO_PATH} failed!"
|
||||||
|
exit $COPYISO_ERR
|
||||||
|
fi
|
||||||
|
|
||||||
|
# create symlink to the last built ISO file
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
|
||||||
|
else
|
||||||
|
ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
|
||||||
|
fi
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Create symlink from ${NEW_BUILD_ISO_PATH} to ${ISO_PATH} failed!"
|
||||||
|
exit $SYMLINKISO_ERR
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# just copy file to shared dir
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo cp "${ISO}" "${ISO_PATH}"
|
||||||
|
else
|
||||||
|
cp "${ISO}" "${ISO_PATH}"
|
||||||
|
fi
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Copy ${ISO} to ${ISO_PATH} failed!"
|
||||||
|
exit $COPYISO_ERR
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Copy ISO from ${ISO} to ${ISO_PATH} failed!"
|
||||||
|
exit $COPYISO_ERR
|
||||||
|
fi
|
||||||
|
echo "Finished building ISO: ${ISO_PATH}"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
CdWorkSpace() {
|
||||||
|
# chdir into workspace or fail if could not
|
||||||
|
if [ "${DRY_RUN}" != "yes" ]; then
|
||||||
|
cd "${WORKSPACE}"
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${ec}" -gt "0" ]; then
|
||||||
|
echo "Error! Cannot cd to WORKSPACE!"
|
||||||
|
exit $CDWORKSPACE_ERR
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo cd "${WORKSPACE}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
RunTest() {
|
||||||
|
# Run test selected by task name
|
||||||
|
|
||||||
|
# check if iso file exists
|
||||||
|
if [ ! -f "${ISO_PATH}" ]; then
|
||||||
|
if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then
|
||||||
|
echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!"
|
||||||
|
exit $NOISOFOUND_ERR
|
||||||
|
else
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo wget -c ${ISO_URL} -O ${ISO_PATH}
|
||||||
|
else
|
||||||
|
echo "No ${ISO_PATH} found. Trying to download file."
|
||||||
|
wget -c ${ISO_URL} -O ${ISO_PATH}
|
||||||
|
rc=$?
|
||||||
|
if [ $rc -ne 0 ]; then
|
||||||
|
echo "Failed to fetch ISO from ${ISO_URL}"
|
||||||
|
exit $ISODOWNLOAD_ERR
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${VENV_PATH}" ]; then
|
||||||
|
VENV_PATH="/home/jenkins/venv-nailgun-tests"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run python virtualenv
|
||||||
|
if [ "${VENV}" = "yes" ]; then
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo . $VENV_PATH/bin/activate
|
||||||
|
else
|
||||||
|
. $VENV_PATH/bin/activate
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${ENV_NAME}" = "" ]; then
|
||||||
|
ENV_NAME="${JOB_NAME}_system_test"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${LOGS_DIR}" = "" ]; then
|
||||||
|
LOGS_DIR="${WORKSPACE}/logs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "$LOGS_DIR" ]; then
|
||||||
|
mkdir -p $LOGS_DIR
|
||||||
|
fi
|
||||||
|
|
||||||
|
export ENV_NAME
|
||||||
|
export LOGS_DIR
|
||||||
|
export ISO_PATH
|
||||||
|
|
||||||
|
if [ "${KEEP_BEFORE}" != "yes" ]; then
|
||||||
|
# remove previous environment
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo dos.py erase "${ENV_NAME}"
|
||||||
|
else
|
||||||
|
if [ $(dos.py list | grep "^${ENV_NAME}\$") ]; then
|
||||||
|
dos.py erase "${ENV_NAME}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gather additional option for this nose test run
|
||||||
|
OPTS=""
|
||||||
|
if [ -n "${NOSE_ATTR}" ]; then
|
||||||
|
OPTS="${OPTS} -a ${NOSE_ATTR}"
|
||||||
|
fi
|
||||||
|
if [ -n "${NOSE_EVAL_ATTR}" ]; then
|
||||||
|
OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}"
|
||||||
|
fi
|
||||||
|
if [ -n "${TEST_OPTIONS}" ]; then
|
||||||
|
OPTS="${OPTS} ${TEST_OPTIONS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run python test set to create environments, deploy and test product
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
|
||||||
|
echo python stacklight_tests/run_tests.py -q --nologcapture --with-xunit ${OPTS}
|
||||||
|
else
|
||||||
|
export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
|
||||||
|
echo ${PYTHONPATH}
|
||||||
|
python stacklight_tests/run_tests.py -q --nologcapture --with-xunit ${OPTS}
|
||||||
|
|
||||||
|
fi
|
||||||
|
ec=$?
|
||||||
|
|
||||||
|
if [ "${KEEP_AFTER}" != "yes" ]; then
|
||||||
|
# remove environment after tests
|
||||||
|
if [ "${DRY_RUN}" = "yes" ]; then
|
||||||
|
echo dos.py destroy "${ENV_NAME}"
|
||||||
|
else
|
||||||
|
dos.py destroy "${ENV_NAME}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "${ec}"
|
||||||
|
}
|
||||||
|
|
||||||
|
RouteTasks() {
|
||||||
|
# this selector defines task names that are recognised by this script
|
||||||
|
# and runs corresponding jobs for them
|
||||||
|
# running any jobs should exit this script
|
||||||
|
|
||||||
|
case "${TASK_NAME}" in
|
||||||
|
test)
|
||||||
|
RunTest
|
||||||
|
;;
|
||||||
|
iso)
|
||||||
|
MakeISO
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown task: ${TASK_NAME}!"
|
||||||
|
exit $INVALIDTASK_ERR
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# MAIN
|
||||||
|
|
||||||
|
# first we want to get variable from command line options
|
||||||
|
GetoptsVariables ${@}
|
||||||
|
|
||||||
|
# then we define global variables and there defaults when needed
|
||||||
|
GlobalVariables
|
||||||
|
|
||||||
|
# check do we have all critical variables set
|
||||||
|
CheckVariables
|
||||||
|
|
||||||
|
# first we chdir into our working directory unless we dry run
|
||||||
|
CdWorkSpace
|
||||||
|
|
||||||
|
# finally we can choose what to do according to TASK_NAME
|
||||||
|
RouteTasks
|
Loading…
Reference in New Issue