Remove the plugin_test/ directory

Since the functional tests now live in the stacklight-integration-tests
project.

Change-Id: I8baec825a06430cb1514c0a83a2dfcff52cfd093
This commit is contained in:
Simon Pasquier 2016-06-03 14:27:40 +02:00
parent 5d25d8fac2
commit 4b42c0d05e
12 changed files with 0 additions and 1277 deletions

4
.gitmodules vendored
View File

@ -1,4 +0,0 @@
[submodule "plugin_test/fuel-qa"]
path = plugin_test/fuel-qa
url = https://github.com/openstack/fuel-qa
branch = stable/8.0

View File

@ -1,13 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

@ -1 +0,0 @@
Subproject commit b54b6a58d8d8148d5a434dbf596baa1b0f23e1a5

View File

@ -1,13 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,112 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from devops.helpers.helpers import wait
from fuelweb_test.helpers.checkers import check_repo_managment
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE
from plugin import TestPlugin
def create_cluster_with_neutron(test_case, **options):
"""Assign neutron with tunneling segmentation."""
default_settings = {
"net_provider": 'neutron',
"net_segment_type": 'tun',
"assign_to_all_nodes": False,
"images_ceph": False,
"volumes_ceph": False,
"ephemeral_ceph": False,
"objects_ceph": False,
"volumes_lvm": True,
"ceilometer": False,
"osd_pool_size": '1',
}
default_settings.update(options)
test_case.cluster_id = test_case.fuel_web.create_cluster(
name=test_case.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings=default_settings)
return test_case.cluster_id
def deploy_cluster(test_case, wait_for_status='operational'):
"""Deploy cluster with additional time for waiting on node's availability
"""
try:
test_case.fuel_web.deploy_cluster_wait(
test_case.cluster_id, check_services=False,
timeout=180 * 60)
except Exception:
nailgun_nodes = test_case.env.fuel_web.client.list_cluster_nodes(
test_case.env.fuel_web.get_last_created_cluster())
time.sleep(420)
for n in nailgun_nodes:
check_repo_managment(
test_case.env.d_env.get_ssh_to_remote(n['ip']))
logger.info('ip is {0}'.format(n['ip'], n['name']))
if wait_for_status:
wait_for_cluster_status(
test_case, test_case.cluster_id, status=wait_for_status)
def update_deploy_check(test_case, nodes, delete=False, run_ostf=True):
# Cluster configuration
test_case.fuel_web.update_nodes(test_case.cluster_id, nodes_dict=nodes,
pending_addition=not delete,
pending_deletion=delete)
# deploy cluster
deploy_cluster(test_case)
# check plugin works
TestPlugin.check_plugin(test_case)
# Run OSTF tests
if run_ostf:
test_case.fuel_web.run_ostf(cluster_id=test_case.cluster_id)
def wait_for_cluster_status(test_case, cluster_id, status='operational',
timeout=60*25):
"""Wait for cluster status until timeout is reached.
:param test_case: Test case object, usually it is 'self'
:param cluster_id: cluster identifier
:param status: Cluster status, available values:
- new
- deployment
- stopped
- operational
- error
- remove
- update
- update_error
:param timeout: the time that we are waiting.
:return: time that we are actually waited.
"""
def check_func():
for c in test_case.fuel_web.client.list_clusters():
if c['id'] == cluster_id and c['status'] == status:
return True
return False
wtime = wait(check_func, interval=30, timeout=timeout)
logger.info('Wait cluster id:"{}" deploy done in {} sec.'.format(
cluster_id, wtime))
return wtime

View File

@ -1,75 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import requests
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from fuelweb_test import logger
from fuelweb_test.settings import ELASTICSEARCH_KIBANA_PLUGIN_PATH
from fuelweb_test.tests.base_test_case import TestBasic
import openstack_utils
NAME = 'elasticsearch_kibana'
VERSION = '0.9.0'
class TestPlugin(TestBasic):
"""Helper methods for testing of plugin."""
def get_vip(self):
networks = self.fuel_web.client.get_networks(self.cluster_id)
return networks.get('vips').get('es_vip_mgmt', {}).get('ipaddr', None)
def prepare_plugin(self, slaves, options=None):
self.env.revert_snapshot("ready_with_%d_slaves" % slaves)
options = options or {}
self.env.admin_actions.upload_plugin(
plugin=ELASTICSEARCH_KIBANA_PLUGIN_PATH)
self.env.admin_actions.install_plugin(
plugin_file_name=os.path.basename(
ELASTICSEARCH_KIBANA_PLUGIN_PATH))
openstack_utils.create_cluster_with_neutron(self, **options)
def activate_plugin(self):
msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
assert_true(
self.fuel_web.check_plugin_exists(self.cluster_id, NAME), msg)
self.fuel_web.update_plugin_settings(
self.cluster_id, NAME, VERSION, {})
def check_plugin(self):
es_server_ip = self.get_vip()
assert_is_not_none(es_server_ip,
"Failed to get the IP of Elasticsearch server")
logger.debug("Check that Elasticsearch is ready")
r = requests.get("http://{}:9200/".format(es_server_ip))
msg = "Elasticsearch responded with {}, expected 200".format(
r.status_code)
assert_equal(r.status_code, 200, msg)
logger.debug("Check that the HTTP server is running")
r = requests.get("http://{}/".format(es_server_ip))
msg = "HTTP server responded with {}, expected 200".format(
r.status_code)
assert_equal(r.status_code, 200, msg)

View File

@ -1,69 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import os
import re
from nose.plugins import Plugin
from paramiko.transport import _join_lingering_threads
class CloseSSHConnectionsPlugin(Plugin):
"""Closes all paramiko's ssh connections after each test case
Plugin fixes proboscis disability to run cleanup of any kind.
'afterTest' calls _join_lingering_threads function from paramiko,
which stops all threads (set the state to inactive and joins for 10s)
"""
name = 'closesshconnections'
def options(self, parser, env=os.environ):
super(CloseSSHConnectionsPlugin, self).options(parser, env=env)
def configure(self, options, conf):
super(CloseSSHConnectionsPlugin, self).configure(options, conf)
self.enabled = True
def afterTest(self, *args, **kwargs):
_join_lingering_threads()
def import_tests():
from tests import test_functional
from tests import test_integration
from tests import test_smoke_bvt
def run_tests():
from proboscis import TestProgram # noqa
import_tests()
# Run Proboscis and exit.
TestProgram(
addplugins=[CloseSSHConnectionsPlugin()]
).run_and_exit()
if __name__ == '__main__':
sys.path.append(sys.path[0]+"/fuel-qa")
import_tests()
from fuelweb_test.helpers.patching import map_test
if any(re.search(r'--group=patching_master_tests', arg)
for arg in sys.argv):
map_test('master')
elif any(re.search(r'--group=patching.*', arg) for arg in sys.argv):
map_test('environment')
run_tests()

View File

@ -1,13 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,152 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import SetupEnvironment
from helpers import openstack_utils
from helpers.plugin import TestPlugin
@test(groups=["plugins"])
class IntegrationTests(TestPlugin):
"""IntegrationTests."""
role_name = 'elasticsearch_kibana'
cluster_id = ''
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["elasticsearch_kibana_add_delete_controller_node"])
@log_snapshot_after_test
def elasticsearch_kibana_plugin_add_delete_controller_node(self):
"""Verify that Controller node can be deleted
and added after deploying
Scenario:
1. Create an environment with
"Neutron with tunneling segmentation"
as a network configuration
2. Enable and configure Elasticsearch-Kibana plugin
3. Add 3 nodes with controller role
4. Add 1 node with compute and storage roles
5. Add 1 node with elasticsearch_kibana role
6. Deploy cluster
7. Run OSTF tests
8. Delete a Controller node and deploy changes
9. Run OSTF tests
10. Add a node with "Controller" role and deploy changes
11. Run OSTF tests. All steps must be completed successfully,
without any errors.
"""
self.prepare_plugin(slaves=5)
self.activate_plugin()
conf_no_controller = {
'slave-01': ['controller'],
'slave-02': ['controller'],
# Here slave-03
'slave-04': ['compute', 'cinder'],
'slave-05': [self.role_name],
}
conf_ctrl = {'slave-03': ['controller']}
openstack_utils.update_deploy_check(
self, dict(conf_no_controller, **conf_ctrl))
openstack_utils.update_deploy_check(self, conf_ctrl, delete=True)
openstack_utils.update_deploy_check(self, conf_ctrl)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["elasticsearch_kibana_plugin_add_delete_compute_node"])
@log_snapshot_after_test
def elasticsearch_kibana_plugin_add_delete_compute_node(self):
"""Verify that Compute node can be deleted and added after deploying
Scenario:
1. Create an environment with
"Neutron with tunneling segmentation"
as a network configuration
2. Enable and configure Elasticsearch-Kibana plugin
3. Add 1 controller, 3 compute + storage nodes
4. Add 1 node with elasticsearch_kibana role
5. Deploy cluster
6. Run OSTF tests
7. Delete a compute node and deploy changes
8. Run OSTF tests
9. Add a node with "compute" role and deploy changes
10. Run OSTF tests
"""
self.prepare_plugin(slaves=5)
self.activate_plugin()
conf_no_controller = {
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': ['compute', 'cinder'],
# Here slave-4
'slave-05': [self.role_name],
}
conf_compute = {'slave-04': ['compute', 'cinder']}
openstack_utils.update_deploy_check(
self, dict(conf_no_controller, **conf_compute))
openstack_utils.update_deploy_check(self, conf_compute, delete=True)
openstack_utils.update_deploy_check(self, conf_compute)
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["elasticsearch_kibana_plugin_add_delete_elasticsearch_node"])
@log_snapshot_after_test
def elasticsearch_kibana_plugin_add_delete_elasticsearch_node(self):
"""Verify that Elasticsearch node can be deleted and added after
deploying
Scenario:
1. Create an environment with
"Neutron with tunneling segmentation"
as a network configuration
2. Enable and configure Elasticsearch-Kibana plugin
3. Add 1 controller, 1 compute + storage nodes
4. Add 3 nodes with elasticsearch_kibana roles
5. Deploy cluster
6. Run OSTF tests
7. Delete an elasticsearch_kibana node and deploy changes
8. Run OSTF tests
9. Add a node with "elasticsearch_kibana" role and deploy changes
10. Run OSTF tests
"""
self.prepare_plugin(slaves=9)
self.activate_plugin()
conf_no_controller = {
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
# Here slave-3
'slave-04': [self.role_name],
'slave-05': [self.role_name],
}
conf_compute = {'slave-03': [self.role_name]}
openstack_utils.update_deploy_check(
self, dict(conf_no_controller, **conf_compute))
openstack_utils.update_deploy_check(self, conf_compute, delete=True)
openstack_utils.update_deploy_check(self, conf_compute)

View File

@ -1,215 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import SetupEnvironment
from helpers import openstack_utils
from helpers.plugin import TestPlugin
@test(groups=["plugins"])
class IntegrationTests(TestPlugin):
"""Class with integration tests for the Elasticsearch-Kibana plugin."""
role_name = 'elasticsearch_kibana'
cluster_id = ''
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["elasticsearch_kibana_ha"])
@log_snapshot_after_test
def elasticsearch_kibana_ha(self):
"""Install Elasticsearch Kibana plugin and deploy cluster
with 3 controllers, 1 compute, 1 elasticsearch_kibana role
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 3 nodes with controller role
5. Add 1 node with compute and cinder role
6. Add 1 node with elasticsearch_kibana role
7. Deploy the cluster
8. Check that plugin is working
9. Run OSTF
Duration 120m
Snapshot elasticsearch_kibana_ha
"""
self.prepare_plugin(slaves=5)
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': [self.role_name]
}
)
openstack_utils.deploy_cluster(self)
self.check_plugin()
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
self.env.make_snapshot("elasticsearch_kibana_ha")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["elasticsearch_kibana_clustering"])
@log_snapshot_after_test
def elasticsearch_kibana_clustering(self):
"""Deploy a cluster with the Elasticsearch-Kibana plugin clustering
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 1 node with compute and cinder role
6. Add 3 nodes with elasticsearch_kibana role
7. Deploy the cluster
8. Check that plugin is working
9. Run OSTF
Duration 120m
Snapshot elasticsearch_kibana_clustering
"""
self.prepare_plugin(slaves=5)
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': [self.role_name],
'slave-04': [self.role_name],
'slave-05': [self.role_name]
}
)
openstack_utils.deploy_cluster(self)
self.check_plugin()
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
self.env.make_snapshot("elasticsearch_kibana_clustering")
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=["elasticsearch_kibana_ha_clustering"])
@log_snapshot_after_test
def elasticsearch_kibana_ha_clustering(self):
"""Deploy a cluster in HA mode with the Elasticsearch-Kibana plugin
clustering
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 3 nodes with controller role
5. Add 1 node with compute role
6. Add 1 node with cinder role
7. Add 3 nodes with elasticsearch_kibana role
8. Deploy the cluster
9. Check that plugin is working
10. Run OSTF
Duration 120m
Snapshot elasticsearch_kibana_ha_clustering
"""
self.prepare_plugin(slaves=9)
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['compute'],
'slave-05': ['cinder'],
'slave-06': [self.role_name],
'slave-07': [self.role_name],
'slave-08': [self.role_name]
}
)
openstack_utils.deploy_cluster(self)
self.check_plugin()
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
self.env.make_snapshot("elasticsearch_kibana_ha_clustering")
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["elasticsearch_kibana_ceph"])
@log_snapshot_after_test
def elasticsearch_kibana_ceph(self):
"""Deploy a cluster in HA mode with the Elasticsearch-Kibana plugin
and Ceph storage
Scenario:
1. Create an environment with "Neutron with tunneling
segmentation" as a network configuration and CEPH storage
2. Install plugin
3. Add 3 nodes with "controller" + "Ceph-OSD" multirole
4. Add 1 node with "compute" role
5. Add 1 node with elasticsearch_kibana role
6. Deploy the cluster with plugin
7. Check that plugin is working
8. Run OSTF
"""
self.prepare_plugin(
slaves=5,
options={
'images_ceph': True,
'volumes_ceph': True,
'ephemeral_ceph': True,
'objects_ceph': True,
'volumes_lvm': False,
'osd_pool_size': '3'
})
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller', 'ceph-osd'],
'slave-02': ['controller', 'ceph-osd'],
'slave-03': ['controller', 'ceph-osd'],
'slave-04': ['compute'],
'slave-05': [self.role_name]
})
openstack_utils.deploy_cluster(self)
self.check_plugin()
self.fuel_web.run_ostf(cluster_id=self.cluster_id)
self.env.make_snapshot("elasticsearch_kibana_ceph")

View File

@ -1,119 +0,0 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.tests.base_test_case import SetupEnvironment
from proboscis import test
from helpers import openstack_utils
from helpers.plugin import TestPlugin
@test(groups=["plugins"])
class ElasticsearchKibanaPlugin(TestPlugin):
"""ElasticsearchKibanaPlugin."""
role_name = 'elasticsearch_kibana'
cluster_id = ''
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["install_elasticsearch_kibana"])
@log_snapshot_after_test
def install_elasticsearch_kibana(self):
"""Install Elasticsearch Kibana Plugin and create cluster
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Upload plugin to the master node
3. Install plugin
4. Create cluster
Duration 20 min
"""
self.prepare_plugin(slaves=3)
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["elasticsearch_kibana_smoke"])
@log_snapshot_after_test
def elasticsearch_kibana_smoke(self):
"""Deploy a cluster with Elasticsearch Kibana Plugin
Scenario:
1. Revert snapshot "ready_with_3_slaves"
2. Create cluster
3. Add a node with elasticsearch_kibana role
4. Add a node with controller role
4. Add a node with compute role
6. Enable Elasticsearch Kibana plugin
5. Deploy the cluster with plugin
Duration 90 min
"""
self.prepare_plugin(slaves=3)
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': [self.role_name],
})
# deploy cluster
openstack_utils.deploy_cluster(self)
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["elasticsearch_kibana_smoke_bvt"])
@log_snapshot_after_test
def elasticsearch_kibana_smoke_bvt(self):
"""BVT test for Elasticsearch Kibana plugin
Install Elasticsearch Kibana plugin and deploy cluster
with 1 controller, 1 compute, 1 elasticsearch_kibana role
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 1 node with compute and cinder role
6. Add 1 node with elasticsearch_kibana role
7. Deploy the cluster
8. Run OSTF
Duration 120m
Snapshot elasticsearch_kibana_ha
"""
self.prepare_plugin(slaves=3)
self.activate_plugin()
self.fuel_web.update_nodes(
self.cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': [self.role_name]
}
)
openstack_utils.deploy_cluster(self)
self.check_elasticsearch_plugin()
self.fuel_web.run_ostf(cluster_id=self.cluster_id)

View File

@ -1,491 +0,0 @@
#!/bin/sh
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# functions
INVALIDOPTS_ERR=100
NOJOBNAME_ERR=101
NOISOPATH_ERR=102
NOTASKNAME_ERR=103
NOWORKSPACE_ERR=104
DEEPCLEAN_ERR=105
MAKEISO_ERR=106
NOISOFOUND_ERR=107
COPYISO_ERR=108
SYMLINKISO_ERR=109
CDWORKSPACE_ERR=110
ISODOWNLOAD_ERR=111
INVALIDTASK_ERR=112
# Defaults
export REBOOT_TIMEOUT=${REBOOT_TIMEOUT:-5000}
export ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT=${ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:-true}
# Export specified settings
if [ -z $NODE_VOLUME_SIZE ]; then export NODE_VOLUME_SIZE=350; fi
if [ -z $OPENSTACK_RELEASE ]; then export OPENSTACK_RELEASE=Ubuntu; fi
if [ -z $ENV_NAME ]; then export ENV_NAME="elasticsearch_kibana"; fi
if [ -z $ADMIN_NODE_MEMORY ]; then export ADMIN_NODE_MEMORY=3072; fi
if [ -z $ADMIN_NODE_CPU ]; then export ADMIN_NODE_CPU=2; fi
if [ -z $SLAVE_NODE_MEMORY ]; then export SLAVE_NODE_MEMORY=4096; fi
if [ -z $SLAVE_NODE_CPU ]; then export SLAVE_NODE_CPU=2; fi
# Init and update submodule
git submodule init && git submodule update
ShowHelp() {
cat << EOF
System Tests Script
It can perform several actions depending on Jenkins JOB_NAME it's ran from
or it can take names from exported environment variables or command line options
if you do need to override them.
-w (dir) - Path to workspace where fuelweb git repository was checked out.
Uses Jenkins' WORKSPACE if not set
-e (name) - Directly specify environment name used in tests
Uses ENV_NAME variable is set.
-j (name) - Name of this job. Determines ISO name, Task name and used by tests.
Uses Jenkins' JOB_NAME if not set
-v - Do not use virtual environment
-V (dir) - Path to python virtual environment
-i (file) - Full path to ISO file to build or use for tests.
Made from iso dir and name if not set.
-t (name) - Name of task this script should perform. Should be one of defined ones.
Taken from Jenkins' job's suffix if not set.
-o (str) - Allows you any extra command line option to run test job if you
want to use some parameters.
-a (str) - Allows you to path NOSE_ATTR to the test job if you want
to use some parameters.
-A (str) - Allows you to path NOSE_EVAL_ATTR if you want to enter attributes
as python expressions.
-m (name) - Use this mirror to build ISO from.
Uses 'srt' if not set.
-U - ISO URL for tests.
Null by default.
-r (yes/no) - Should built ISO file be places with build number tag and
symlinked to the last build or just copied over the last file.
-b (num) - Allows you to override Jenkins' build number if you need to.
-l (dir) - Path to logs directory. Can be set by LOGS_DIR environment variable.
Uses WORKSPACE/logs if not set.
-d - Dry run mode. Only show what would be done and do nothing.
Useful for debugging.
-k - Keep previously created test environment before tests run
-K - Keep test environment after tests are finished
-h - Show this help page
Most variables uses guesing from Jenkins' job name but can be overriden
by exported variable before script is run or by one of command line options.
You can override following variables using export VARNAME="value" before running this script
WORKSPACE - path to directory where Fuelweb repository was checked out by Jenkins or manually
JOB_NAME - name of Jenkins job that determines which task should be done and ISO file name.
If task name is "iso" it will make iso file
Other defined names will run Nose tests using previously built ISO file.
ISO file name is taken from job name prefix
Task name is taken from job name suffix
Separator is one dot '.'
For example if JOB_NAME is:
mytest.somestring.iso
ISO name: mytest.iso
Task name: iso
If ran with such JOB_NAME iso file with name mytest.iso will be created
If JOB_NAME is:
mytest.somestring.node
ISO name: mytest.iso
Task name: node
If script was run with this JOB_NAME node tests will be using ISO file mytest.iso.
First you should run mytest.somestring.iso job to create mytest.iso.
Then you can ran mytest.somestring.node job to start tests using mytest.iso and other tests too.
EOF
}
GlobalVariables() {
# where built iso's should be placed
# use hardcoded default if not set before by export
ISO_DIR="${ISO_DIR:=/var/www/fuelweb-iso}"
# name of iso file
# taken from jenkins job prefix
# if not set before by variable export
if [ -z "${ISO_NAME}" ]; then
ISO_NAME="${JOB_NAME%.*}.iso"
fi
# full path where iso file should be placed
# make from iso name and path to iso shared directory
# if was not overriden by options or export
if [ -z "${ISO_PATH}" ]; then
ISO_PATH="${ISO_DIR}/${ISO_NAME}"
fi
# what task should be ran
# it's taken from jenkins job name suffix if not set by options
if [ -z "${TASK_NAME}" ]; then
TASK_NAME="${JOB_NAME##*.}"
fi
# do we want to keep iso's for each build or just copy over single file
ROTATE_ISO="${ROTATE_ISO:=yes}"
# choose mirror to build iso from. Default is 'srt' for Saratov's mirror
# you can change mirror by exporting USE_MIRROR variable before running this script
USE_MIRROR="${USE_MIRROR:=srt}"
# only show what commands would be executed but do nothing
# this feature is useful if you want to debug this script's behaviour
DRY_RUN="${DRY_RUN:=no}"
VENV="${VENV:=yes}"
}
GetoptsVariables() {
while getopts ":w:j:i:t:o:a:A:m:U:r:b:V:l:dkKe:v:h" opt; do
case $opt in
w)
WORKSPACE="${OPTARG}"
;;
j)
JOB_NAME="${OPTARG}"
;;
i)
ISO_PATH="${OPTARG}"
;;
t)
TASK_NAME="${OPTARG}"
;;
o)
TEST_OPTIONS="${TEST_OPTIONS} ${OPTARG}"
;;
a)
NOSE_ATTR="${OPTARG}"
;;
A)
NOSE_EVAL_ATTR="${OPTARG}"
;;
m)
USE_MIRROR="${OPTARG}"
;;
U)
ISO_URL="${OPTARG}"
;;
r)
ROTATE_ISO="${OPTARG}"
;;
b)
BUILD_NUMBER="${OPTARG}"
;;
V)
VENV_PATH="${OPTARG}"
;;
l)
LOGS_DIR="${OPTARG}"
;;
k)
KEEP_BEFORE="yes"
;;
K)
KEEP_AFTER="yes"
;;
e)
ENV_NAME="${OPTARG}"
;;
d)
DRY_RUN="yes"
;;
v)
VENV="no"
;;
h)
ShowHelp
exit 0
;;
\?)
echo "Invalid option: -$OPTARG"
ShowHelp
exit $INVALIDOPTS_ERR
;;
:)
echo "Option -$OPTARG requires an argument."
ShowHelp
exit $INVALIDOPTS_ERR
;;
esac
done
}
CheckVariables() {
if [ -z "${JOB_NAME}" ]; then
echo "Error! JOB_NAME is not set!"
exit $NOJOBNAME_ERR
fi
if [ -z "${ISO_PATH}" ]; then
echo "Error! ISO_PATH is not set!"
exit $NOISOPATH_ERR
fi
if [ -z "${TASK_NAME}" ]; then
echo "Error! TASK_NAME is not set!"
exit $NOTASKNAME_ERR
fi
if [ -z "${WORKSPACE}" ]; then
echo "Error! WORKSPACE is not set!"
exit $NOWORKSPACE_ERR
fi
}
MakeISO() {
# Create iso file to be used in tests
# clean previous garbage
if [ "${DRY_RUN}" = "yes" ]; then
echo make deep_clean
else
make deep_clean
fi
ec="${?}"
if [ "${ec}" -gt "0" ]; then
echo "Error! Deep clean failed!"
exit $DEEPCLEAN_ERR
fi
# create ISO file
export USE_MIRROR
if [ "${DRY_RUN}" = "yes" ]; then
echo make iso
else
make iso
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error making ISO!"
exit $MAKEISO_ERR
fi
if [ "${DRY_RUN}" = "yes" ]; then
ISO="${WORKSPACE}/build/iso/fuel.iso"
else
ISO="`ls ${WORKSPACE}/build/iso/*.iso | head -n 1`"
# check that ISO file exists
if [ ! -f "${ISO}" ]; then
echo "Error! ISO file not found!"
exit $NOISOFOUND_ERR
fi
fi
# copy ISO file to storage dir
# if rotation is enabled and build number is available
# save iso to tagged file and symlink to the last build
# if rotation is not enabled just copy iso to iso_dir
if [ "${ROTATE_ISO}" = "yes" -a "${BUILD_NUMBER}" != "" ]; then
# copy iso file to shared dir with revision tagged name
NEW_BUILD_ISO_PATH="${ISO_PATH#.iso}_${BUILD_NUMBER}.iso"
if [ "${DRY_RUN}" = "yes" ]; then
echo cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
else
cp "${ISO}" "${NEW_BUILD_ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ${ISO} to ${NEW_BUILD_ISO_PATH} failed!"
exit $COPYISO_ERR
fi
# create symlink to the last built ISO file
if [ "${DRY_RUN}" = "yes" ]; then
echo ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
else
ln -sf "${NEW_BUILD_ISO_PATH}" "${ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Create symlink from ${NEW_BUILD_ISO_PATH} to ${ISO_PATH} failed!"
exit $SYMLINKISO_ERR
fi
else
# just copy file to shared dir
if [ "${DRY_RUN}" = "yes" ]; then
echo cp "${ISO}" "${ISO_PATH}"
else
cp "${ISO}" "${ISO_PATH}"
fi
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ${ISO} to ${ISO_PATH} failed!"
exit $COPYISO_ERR
fi
fi
if [ "${ec}" -gt "0" ]; then
echo "Error! Copy ISO from ${ISO} to ${ISO_PATH} failed!"
exit $COPYISO_ERR
fi
echo "Finished building ISO: ${ISO_PATH}"
exit 0
}
CdWorkSpace() {
# chdir into workspace or fail if could not
if [ "${DRY_RUN}" != "yes" ]; then
cd "${WORKSPACE}"
ec=$?
if [ "${ec}" -gt "0" ]; then
echo "Error! Cannot cd to WORKSPACE!"
exit $CDWORKSPACE_ERR
fi
else
echo cd "${WORKSPACE}"
fi
}
RunTest() {
# Run test selected by task name
# check if iso file exists
if [ ! -f "${ISO_PATH}" ]; then
if [ -z "${ISO_URL}" -a "${DRY_RUN}" != "yes" ]; then
echo "Error! File ${ISO_PATH} not found and no ISO_URL (-U key) for downloading!"
exit $NOISOFOUND_ERR
else
if [ "${DRY_RUN}" = "yes" ]; then
echo wget -c ${ISO_URL} -O ${ISO_PATH}
else
echo "No ${ISO_PATH} found. Trying to download file."
wget -c ${ISO_URL} -O ${ISO_PATH}
rc=$?
if [ $rc -ne 0 ]; then
echo "Failed to fetch ISO from ${ISO_URL}"
exit $ISODOWNLOAD_ERR
fi
fi
fi
fi
if [ -z "${VENV_PATH}" ]; then
VENV_PATH="/home/jenkins/venv-nailgun-tests"
fi
# run python virtualenv
if [ "${VENV}" = "yes" ]; then
if [ "${DRY_RUN}" = "yes" ]; then
echo . $VENV_PATH/bin/activate
else
. $VENV_PATH/bin/activate
fi
fi
if [ "${ENV_NAME}" = "" ]; then
ENV_NAME="${JOB_NAME}_system_test"
fi
if [ "${LOGS_DIR}" = "" ]; then
LOGS_DIR="${WORKSPACE}/logs"
fi
if [ ! -f "$LOGS_DIR" ]; then
mkdir -p $LOGS_DIR
fi
export ENV_NAME
export LOGS_DIR
export ISO_PATH
if [ "${KEEP_BEFORE}" != "yes" ]; then
# remove previous environment
if [ "${DRY_RUN}" = "yes" ]; then
echo dos.py erase "${ENV_NAME}"
else
if [ $(dos.py list | grep "^${ENV_NAME}\$") ]; then
dos.py erase "${ENV_NAME}"
fi
fi
fi
# gather additional option for this nose test run
OPTS=""
if [ -n "${NOSE_ATTR}" ]; then
OPTS="${OPTS} -a ${NOSE_ATTR}"
fi
if [ -n "${NOSE_EVAL_ATTR}" ]; then
OPTS="${OPTS} -A ${NOSE_EVAL_ATTR}"
fi
if [ -n "${TEST_OPTIONS}" ]; then
OPTS="${OPTS} ${TEST_OPTIONS}"
fi
# run python test set to create environments, deploy and test product
if [ "${DRY_RUN}" = "yes" ]; then
echo export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
echo python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS}
else
export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}${WORKSPACE}"
echo ${PYTHONPATH}
python plugin_test/run_tests.py -q --nologcapture --with-xunit ${OPTS}
fi
ec=$?
if [ "${KEEP_AFTER}" != "yes" ]; then
# remove environment after tests
if [ "${DRY_RUN}" = "yes" ]; then
echo dos.py destroy "${ENV_NAME}"
else
dos.py destroy "${ENV_NAME}"
fi
fi
exit "${ec}"
}
RouteTasks() {
# this selector defines task names that are recognised by this script
# and runs corresponding jobs for them
# running any jobs should exit this script
case "${TASK_NAME}" in
test)
RunTest
;;
iso)
MakeISO
;;
*)
echo "Unknown task: ${TASK_NAME}!"
exit $INVALIDTASK_ERR
;;
esac
exit 0
}
# MAIN
# first we want to get variable from command line options
GetoptsVariables ${@}
# then we define global variables and there defaults when needed
GlobalVariables
# check do we have all critical variables set
CheckVariables
# first we chdir into our working directory unless we dry run
CdWorkSpace
# finally we can choose what to do according to TASK_NAME
RouteTasks