Add daisy tempest.

Change-Id: I5831348eaf8a427afb50836089b246a8275cd57e
This commit is contained in:
KongWei 2016-07-12 00:26:43 +00:00
parent 97b4c5428b
commit b275409006
29 changed files with 6889 additions and 0 deletions

428
test/tempest/clients.py Normal file
View File

@ -0,0 +1,428 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from tempest.common import cred_provider
from tempest.common import negative_rest_client
from tempest import config
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClientJSON
from tempest.services import botoclients
from tempest.services.compute.json.agents_client import \
AgentsClientJSON
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
from tempest.services.compute.json.availability_zone_client import \
AvailabilityZoneClientJSON
from tempest.services.compute.json.baremetal_nodes_client import \
BaremetalNodesClientJSON
from tempest.services.compute.json.certificates_client import \
CertificatesClientJSON
from tempest.services.compute.json.extensions_client import \
ExtensionsClientJSON
from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON
from tempest.services.compute.json.flavors_client import FlavorsClientJSON
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClientJSON
from tempest.services.compute.json.hosts_client import HostsClientJSON
from tempest.services.compute.json.hypervisor_client import \
HypervisorClientJSON
from tempest.services.compute.json.images_client import ImagesClientJSON
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientJSON
from tempest.services.compute.json.interfaces_client import \
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
from tempest.services.compute.json.migrations_client import \
MigrationsClientJSON
from tempest.services.compute.json.networks_client import NetworksClientJSON
from tempest.services.compute.json.quotas_client import QuotaClassesClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_group_default_rules_client import \
SecurityGroupDefaultRulesClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
from tempest.services.compute.json.services_client import ServicesClientJSON
from tempest.services.compute.json.tenant_networks_client import \
TenantNetworksClientJSON
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClientJSON
from tempest.services.database.json.limits_client import \
DatabaseLimitsClientJSON
from tempest.services.database.json.versions_client import \
DatabaseVersionsClientJSON
from tempest.services.identity.v2.json.identity_client import \
IdentityClientJSON
from tempest.services.identity.v2.json.token_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClientJSON
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.region_client import RegionClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.json.token_client import V3TokenClientJSON
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
from tempest.services.messaging.json.messaging_client import \
MessagingClientJSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
from tempest.services.volume.json.admin.volume_services_client import \
VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.availability_zone_client import \
VolumeAvailabilityZoneClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
from tempest.services.volume.json.qos_client import QosSpecsClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
from tempest.services.volume.json.volumes_client import VolumesClientJSON
from tempest.services.volume.v2.json.admin.volume_hosts_client import \
VolumeHostsV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_quotas_client import \
VolumeQuotasV2Client
from tempest.services.volume.v2.json.admin.volume_services_client import \
VolumesServicesV2ClientJSON
from tempest.services.volume.v2.json.admin.volume_types_client import \
VolumeTypesV2ClientJSON
from tempest.services.volume.v2.json.availability_zone_client import \
VolumeV2AvailabilityZoneClientJSON
from tempest.services.volume.v2.json.backups_client import BackupsClientV2JSON
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
from tempest.services.volume.v2.json.snapshots_client import \
SnapshotsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
self._set_volume_clients()
self._set_object_storage_clients()
self.baremetal_client = BaremetalClientJSON(
self.auth_provider,
CONF.baremetal.catalog_type,
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
self.network_client = NetworkClientJSON(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.messaging_client = MessagingClientJSON(
self.auth_provider,
CONF.messaging.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.glance:
self.image_client = ImageClientJSON(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.image_client_v2 = ImageClientV2JSON(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.orchestration_client = OrchestrationClient(
self.auth_provider,
CONF.orchestration.catalog_type,
CONF.orchestration.region or CONF.identity.region,
endpoint_type=CONF.orchestration.endpoint_type,
build_interval=CONF.orchestration.build_interval,
build_timeout=CONF.orchestration.build_timeout,
**self.default_params)
self.data_processing_client = DataProcessingClient(
self.auth_provider,
CONF.data_processing.catalog_type,
CONF.identity.region,
endpoint_type=CONF.data_processing.endpoint_type,
**self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
# Generating EC2 credentials in tempest is only supported
# with identity v2
if CONF.identity_feature_enabled.api_v2 and \
CONF.identity.auth_version == 'v2':
# EC2 and S3 clients, if used, will check onfigured AWS credentials
# and generate new ones if needed
self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
self.s3_client = botoclients.ObjectClientS3(self.identity_client)
def _set_compute_clients(self):
params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
params.update(self.default_params)
self.agents_client = AgentsClientJSON(self.auth_provider, **params)
self.networks_client = NetworksClientJSON(self.auth_provider, **params)
self.migrations_client = MigrationsClientJSON(self.auth_provider,
**params)
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClientJSON(self.auth_provider, **params))
self.certificates_client = CertificatesClientJSON(self.auth_provider,
**params)
self.servers_client = ServersClientJSON(
self.auth_provider,
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
self.limits_client = LimitsClientJSON(self.auth_provider, **params)
self.images_client = ImagesClientJSON(self.auth_provider, **params)
self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params)
self.quotas_client = QuotasClientJSON(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClientJSON(self.auth_provider,
**params)
self.flavors_client = FlavorsClientJSON(self.auth_provider, **params)
self.extensions_client = ExtensionsClientJSON(self.auth_provider,
**params)
self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider,
**params)
self.security_groups_client = SecurityGroupsClientJSON(
self.auth_provider, **params)
self.interfaces_client = InterfacesClientJSON(self.auth_provider,
**params)
self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider,
**params)
self.availability_zone_client = AvailabilityZoneClientJSON(
self.auth_provider, **params)
self.aggregates_client = AggregatesClientJSON(self.auth_provider,
**params)
self.services_client = ServicesClientJSON(self.auth_provider, **params)
self.tenant_usages_client = TenantUsagesClientJSON(self.auth_provider,
**params)
self.hosts_client = HostsClientJSON(self.auth_provider, **params)
self.hypervisor_client = HypervisorClientJSON(self.auth_provider,
**params)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientJSON(self.auth_provider, **params)
self.tenant_networks_client = \
TenantNetworksClientJSON(self.auth_provider, **params)
self.baremetal_nodes_client = BaremetalNodesClientJSON(
self.auth_provider, **params)
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
params_volume = copy.deepcopy(params)
params_volume.update({
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
})
self.volumes_extensions_client = VolumesExtensionsClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params_volume)
def _set_database_clients(self):
self.database_flavors_client = DatabaseFlavorsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_limits_client = DatabaseLimitsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_versions_client = DatabaseVersionsClientJSON(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region,
'endpoint_type': 'adminURL'
}
params.update(self.default_params_with_timeout_values)
self.identity_client = IdentityClientJSON(self.auth_provider,
**params)
self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
**params)
self.endpoints_client = EndPointClientJSON(self.auth_provider,
**params)
self.service_client = ServiceClientJSON(self.auth_provider, **params)
self.policy_client = PolicyClientJSON(self.auth_provider, **params)
self.region_client = RegionClientJSON(self.auth_provider, **params)
self.credentials_client = CredentialsClientJSON(self.auth_provider,
**params)
# Token clients do not use the catalog. They only need default_params.
self.token_client = TokenClientJSON(CONF.identity.uri,
**self.default_params)
if CONF.identity_feature_enabled.api_v3:
self.token_v3_client = V3TokenClientJSON(CONF.identity.uri_v3,
**self.default_params)
def _set_volume_clients(self):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(self.default_params)
self.volume_qos_client = QosSpecsClientJSON(self.auth_provider,
**params)
self.volume_qos_v2_client = QosSpecsV2ClientJSON(
self.auth_provider, **params)
self.volume_services_v2_client = VolumesServicesV2ClientJSON(
self.auth_provider, **params)
self.backups_client = BackupsClientJSON(self.auth_provider, **params)
self.backups_v2_client = BackupsClientV2JSON(self.auth_provider,
**params)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider,
**params)
self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider,
**params)
self.volumes_client = VolumesClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volumes_v2_client = VolumesV2ClientJSON(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volume_types_client = VolumeTypesClientJSON(self.auth_provider,
**params)
self.volume_services_client = VolumesServicesClientJSON(
self.auth_provider, **params)
self.volume_hosts_client = VolumeHostsClientJSON(self.auth_provider,
**params)
self.volume_hosts_v2_client = VolumeHostsV2ClientJSON(
self.auth_provider, **params)
self.volume_quotas_client = VolumeQuotasClientJSON(self.auth_provider,
**params)
self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
**params)
self.volumes_extension_client = VolumeExtensionClientJSON(
self.auth_provider, **params)
self.volumes_v2_extension_client = VolumeV2ExtensionClientJSON(
self.auth_provider, **params)
self.volume_availability_zone_client = \
VolumeAvailabilityZoneClientJSON(self.auth_provider, **params)
self.volume_v2_availability_zone_client = \
VolumeV2AvailabilityZoneClientJSON(self.auth_provider, **params)
self.volume_types_v2_client = VolumeTypesV2ClientJSON(
self.auth_provider, **params)
def _set_object_storage_clients(self):
params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
params.update(self.default_params_with_timeout_values)
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)

1234
test/tempest/etc/tempest.conf Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
[DEFAULT]
# The list of modules to copy from openstack-common
module=install_venv_common
module=versionutils
# The base module to hold the copy of openstack.common
base=tempest

View File

@ -0,0 +1,26 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
testtools>=0.9.36,!=1.2.0
boto>=2.32.1
paramiko>=1.13.0
netaddr>=0.7.12
python-glanceclient>=0.15.0
python-cinderclient>=1.1.0
python-heatclient>=0.3.0
testrepository>=0.0.18
oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0
oslo.config>=1.9.3,<1.10.0 # Apache-2.0
oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0
oslo.log>=1.0.0,<1.1.0 # Apache-2.0
oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0
oslo.utils>=1.4.0,<1.5.0 # Apache-2.0
six>=1.9.0
iso8601>=0.1.9
fixtures>=0.3.14
testscenarios>=0.4
tempest-lib>=0.4.0

146
test/tempest/run_tempest.sh Normal file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Tempest test suite"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
echo " -t, --serial Run testr serially"
echo " -C, --config Config file location"
echo " -h, --help Print this usage message"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB"
echo " -l, --logging Enable logging"
echo " -L, --logging-config Logging config file location. Default is etc/logging.conf"
echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
testrargs=""
venv=.venv
with_venv=tools/with_venv.sh
serial=0
always_venv=0
never_venv=0
no_site_packages=0
debug=0
force=0
wrapper=""
config_file=""
update=0
logging=0
logging_config=etc/logging.conf
if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
exit 1
fi
eval set -- $options
first_uu=yes
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) usage; exit;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-n|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-d|--debug) debug=1;;
-C|--config) config_file=$2; shift;;
-s|--smoke) testrargs+="smoke";;
-t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
*) testrargs="$testrargs $1";;
esac
shift
done
if [ -n "$config_file" ]; then
config_file=`readlink -f "$config_file"`
export TEMPEST_CONFIG_DIR=`dirname "$config_file"`
export TEMPEST_CONFIG=`basename "$config_file"`
fi
if [ $logging -eq 1 ]; then
if [ ! -f "$logging_config" ]; then
echo "No such logging config file: $logging_config"
exit 1
fi
logging_config=`readlink -f "$logging_config"`
export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"`
export TEMPEST_LOG_CONFIG=`basename "$logging_config"`
fi
cd `dirname "$0"`
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function testr_init {
if [ ! -d .testrepository ]; then
${wrapper} testr init
fi
}
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
export OS_TEST_PATH=./tempest/test_discover
if [ $debug -eq 1 ]; then
if [ "$testrargs" = "" ]; then
testrargs="discover ./tempest/test_discover"
fi
${wrapper} python -m testtools.run $testrargs
return $?
fi
if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
else
${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
run_tests
retval=$?
exit $retval

150
test/tempest/run_tests.sh Normal file
View File

@ -0,0 +1,150 @@
#!/usr/bin/env bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Tempest unit tests"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -t, --serial Run testr serially"
echo " -p, --pep8 Just run pep8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB"
echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
testrargs=""
just_pep8=0
venv=.venv
with_venv=tools/with_venv.sh
serial=0
always_venv=0
never_venv=0
no_site_packages=0
debug=0
force=0
coverage=0
wrapper=""
config_file=""
update=0
if ! options=$(getopt -o VNnfuctphd -l virtual-env,no-virtual-env,no-site-packages,force,update,serial,coverage,pep8,help,debug -- "$@")
then
# parse error
usage
exit 1
fi
eval set -- $options
first_uu=yes
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) usage; exit;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-n|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-d|--debug) debug=1;;
-p|--pep8) let just_pep8=1;;
-c|--coverage) coverage=1;;
-t|--serial) serial=1;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
*) testrargs="$testrargs $1";;
esac
shift
done
cd `dirname "$0"`
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function testr_init {
if [ ! -d .testrepository ]; then
${wrapper} testr init
fi
}
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
export OS_TEST_PATH=./tempest/tests
if [ $debug -eq 1 ]; then
if [ "$testrargs" = "" ]; then
testrargs="discover ./tempest/tests"
fi
${wrapper} python -m testtools.run $testrargs
return $?
fi
if [ $coverage -eq 1 ]; then
${wrapper} python setup.py test --coverage
return $?
fi
if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
else
${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
function run_pep8 {
echo "Running flake8 ..."
if [ $never_venv -eq 1 ]; then
echo "**WARNING**:" >&2
echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2
fi
${wrapper} flake8
}
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
run_tests
retval=$?
if [ -z "$testrargs" ]; then
run_pep8
fi
exit $retval

36
test/tempest/setup.cfg Normal file
View File

@ -0,0 +1,36 @@
[metadata]
name = tempest
version = 4
summary = OpenStack Integration Testing
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[entry_points]
console_scripts =
verify-tempest-config = tempest.cmd.verify_tempest_config:main
javelin2 = tempest.cmd.javelin:main
run-tempest-stress = tempest.cmd.run_stress:main
tempest-cleanup = tempest.cmd.cleanup:main
oslo.config.opts =
tempest.config = tempest.config:list_opts
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[wheel]
universal = 1

30
test/tempest/setup.py Normal file
View File

@ -0,0 +1,30 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)

View File

View File

@ -0,0 +1,52 @@
.. _api_field_guide:
Tempest Field Guide to API tests
================================
What are these tests?
---------------------
One of Tempest's prime function is to ensure that your OpenStack cloud
works with the OpenStack API as documented. The current largest
portion of Tempest code is devoted to test cases that do exactly this.
It's also important to test not only the expected positive path on
APIs, but also to provide them with invalid data to ensure they fail
in expected and documented ways. Over the course of the OpenStack
project Tempest has discovered many fundamental bugs by doing just
this.
In order for some APIs to return meaningful results, there must be
enough data in the system. This means these tests might start by
spinning up a server, image, etc, then operating on it.
Why are these tests in tempest?
-------------------------------
This is one of the core missions for the Tempest project, and where it
started. Many people use this bit of function in Tempest to ensure
their clouds haven't broken the OpenStack API.
It could be argued that some of the negative testing could be done
back in the projects themselves, and we might evolve there over time,
but currently in the OpenStack gate this is a fundamentally important
place to keep things.
Scope of these tests
--------------------
API tests should always use the Tempest implementation of the
OpenStack API, as we want to ensure that bugs aren't hidden by the
official clients.
They should test specific API calls, and can build up complex state if
it's needed for the API call to be meaningful.
They should send not only good data, but bad data at the API and look
for error codes.
They should all be able to be run on their own, not depending on the
state created by a previous test.

View File

View File

@ -0,0 +1,562 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import config
import tempest.test
from daisyclient.v1 import client as daisy_client
from ironicclient import client as ironic_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseDaisyTest(tempest.test.BaseTestCase):
@classmethod
def skip_checks(cls):
super(BaseDaisyTest, cls).skip_checks()
@classmethod
def resource_setup(cls):
super(BaseDaisyTest, cls).resource_setup()
cls.daisy_version = 1.0
cls.daisy_endpoint = CONF.daisy.daisy_endpoint
cls.daisy_client = daisy_client.Client(version=cls.daisy_version,
endpoint=cls.daisy_endpoint)
cls.ironic_client = ironic_client.get_client(
1, os_auth_token='fake', ironic_url='http://127.0.0.1:6385/v1')
@classmethod
def resource_cleanup(cls):
super(BaseDaisyTest, cls).resource_cleanup()
@classmethod
def add_cluster(self, **cluster_meta):
add_cluster_info = self.daisy_client.clusters.add(**cluster_meta)
return add_cluster_info
@classmethod
def update_cluster(self, cluster_id, **cluster_meta):
update_cluster_info = self.daisy_client.clusters.update(
cluster_id, **cluster_meta)
return update_cluster_info
@classmethod
def list_clusters(self, **cluster_meta):
clusters_info = self.daisy_client.clusters.list(**cluster_meta)
return clusters_info
@classmethod
def list_filter_clusters(self, **cluster_meta):
cluster_meta['filters'] = cluster_meta
clusters_info = self.daisy_client.clusters.list(**cluster_meta)
return clusters_info
@classmethod
def get_cluster(self, cluster_id):
cluster_info = self.daisy_client.clusters.get(cluster_id)
return cluster_info
@classmethod
def _clean_all_cluster(self):
clusters_list_generator = self.daisy_client.clusters.list()
clusters_list = [clusters for clusters in clusters_list_generator]
if clusters_list:
for cluster in clusters_list:
self.delete_cluster(cluster)
@classmethod
def add_hwm(self, **hwm_meta):
hwm_info = self.daisy_client.hwm.add(**hwm_meta)
return hwm_info
@classmethod
def delete_hwm(self, hwm_meta):
self.daisy_client.hwm.delete(hwm_meta)
@classmethod
def update_hwm(self, hwm_id, **hwm_meta):
hwm_info = self.daisy_client.hwm.update(hwm_id, **hwm_meta)
return hwm_info
@classmethod
def _clean_all_hwm(self):
hwm_list_generator = self.daisy_client.hwm.list()
hwm_list = [hwms for hwms in hwm_list_generator]
if hwm_list:
for hwm in hwm_list:
self.delete_hwm(hwm)
@classmethod
def list_hwm(self, **hwm_meta):
hwm_meta['filters'] = hwm_meta
hwm_list = self.daisy_client.hwm.list(**hwm_meta)
return hwm_list
@classmethod
def get_hwm_detail(self, hwm_meta):
hwm_detail = self.daisy_client.hwm.get(hwm_meta)
return hwm_detail
@classmethod
def add_host(self, **host_meta):
host_info = self.daisy_client.hosts.add(**host_meta)
return host_info
@classmethod
def delete_host(self, host_meta):
self.daisy_client.hosts.delete(host_meta)
@classmethod
def update_host(self, host_id, **host_meta):
host_info = self.daisy_client.hosts.update(host_id, **host_meta)
return host_info
@classmethod
def _clean_all_host(self):
hosts_list_generator = self.daisy_client.hosts.list()
hosts_list = [hosts for hosts in hosts_list_generator]
if hosts_list:
for host in hosts_list:
self.delete_host(host)
@classmethod
def list_host(self, **host_meta):
host_meta['filters'] = host_meta
host_list = self.daisy_client.hosts.list(**host_meta)
return host_list
@classmethod
def get_host_detail(self, host_meta):
host_detail = self.daisy_client.hosts.get(host_meta)
return host_detail
@classmethod
def add_discover_host(self, **host_meta):
host_info = self.daisy_client.hosts.add_discover_host(**host_meta)
return host_info
@classmethod
def update_discover_host(self, host_id, **host_meta):
host_info = self.daisy_client.hosts.update_discover_host(
host_id, **host_meta)
return host_info
@classmethod
def delete_discover_host(self, host_meta):
self.daisy_client.hosts.delete_discover_host(host_meta)
@classmethod
def list_discover_host(self, **host_meta):
host_meta['filters'] = host_meta
host_list = self.daisy_client.hosts.list_discover_host(**host_meta)
return host_list
@classmethod
def get_discover_host_detail(self, host_meta):
host_detail = self.daisy_client.hosts.get_discover_host_detail(host_meta)
return host_detail
@classmethod
def discover_host(self, **host_meta):
host_discovery = self.daisy_client.hosts.discover_host(**host_meta)
return host_discovery
@classmethod
def _clean_all_discover_host(self):
host_meta = {}
hosts_list_generator = self.daisy_client.hosts.list_discover_host(**host_meta)
hosts_list = [hosts for hosts in hosts_list_generator]
if hosts_list:
for host in hosts_list:
self.delete_discover_host(host)
@classmethod
def add_network(self, **network_meta):
network_info = self.daisy_client.networks.add(**network_meta)
return network_info
@classmethod
def get_network(self, network_id):
network_info = self.daisy_client.networks.get(network_id)
return network_info
@classmethod
def list_network(self, **network_meta):
network = {'sort_key': 'name',
'sort_dir': 'asc',
'filters': network_meta}
network_infos = self.daisy_client.networks.list(**network)
return network_infos
@classmethod
def update_network(self, network_id, **network_meta):
network_info = self.daisy_client.networks.update(network_id,
**network_meta)
return network_info
@classmethod
def delete_network(self, network_id):
self.daisy_client.networks.delete(network_id)
@classmethod
def list_roles(self, **role_meta):
roles_info = self.daisy_client.roles.list(**role_meta)
return roles_info
@classmethod
def add_role(self, **role_meta):
roles_info = self.daisy_client.roles.add(**role_meta)
return roles_info
@classmethod
def get_role(self, role_id):
role_info = self.daisy_client.roles.get(role_id)
return role_info
@classmethod
def delete_role(self, role_id):
self.daisy_client.roles.delete(role_id)
@classmethod
def update_role(self, role_id, **role_meta):
role_info = self.daisy_client.roles.update(role_id, **role_meta)
return role_info
@classmethod
def install(self, **install_meta):
install_info = self.daisy_client.install.install(**install_meta)
return install_info
@classmethod
def get_cluster_id(self, cluster_meta):
if not cluster_meta:
cluster_list = self.daisy_client.clusters.list()
for cluster in cluster_list:
cluster_id = {'cluster_id': cluster.id}
else:
cluster_id = {'cluster_id': cluster_meta}
return cluster_id
@classmethod
def get_uninstall_status(self, **cluster_id):
nodes = self.daisy_client.uninstall.query_progress(**cluster_id)
return nodes
@classmethod
def delete_cluster(self, cluster_meta):
self.daisy_client.clusters.delete(cluster_meta)
@classmethod
def uninstall(self, **cluster_id):
self.daisy_client.uninstall.uninstall(**cluster_id)
@classmethod
def update(self, **cluster_id):
self.daisy_client.update.update(**cluster_id)
@classmethod
def get_update_status(self, **cluster_id):
nodes = self.daisy_client.update.query_progress(**cluster_id)
return nodes
@classmethod
def list_components(self, **component_meta):
components_info = self.daisy_client.components.list(**component_meta)
return components_info
@classmethod
def add_config(self, **config_meta):
config_info = self.daisy_client.configs.add(**config_meta)
return config_info
@classmethod
def get_config(self, config_id):
config_meta = {}
config_info = self.daisy_client.configs.get(config_id, **config_meta)
return config_info
def delete_config(self, config_id):
config = {'config': [config_id]}
self.daisy_client.configs.delete(**config)
@classmethod
def _clean_all_config(self):
configs_list_generator = self.daisy_client.configs.list()
configs_list = [configs for configs in configs_list_generator]
if configs_list:
for _config in configs_list:
_config = {'config': [config.id]}
self.daisy_client.configs.delete(**_config)
@classmethod
def list_config(self):
configs_list = self.daisy_client.configs.list()
return configs_list
@classmethod
def cluster_config_set_update(self, **config_set):
config_set = self.daisy_client.config_sets.cluster_config_set_update(
**config_set)
return config_set
@classmethod
def cluster_config_set_progress(self, **config_set):
config_set = self.daisy_client.config_sets.cluster_config_set_progress(**config_set)
return config_set
@classmethod
def add_config_set(self, **config_set):
config_set = self.daisy_client.config_sets.add(**config_set)
return config_set
@classmethod
def update_config_set(self, config_set_id, **config_set):
config_set = self.daisy_client.config_sets.update(
config_set_id, **config_set)
return config_set
@classmethod
def get_config_set(self, config_set_id):
config_set = self.daisy_client.config_sets.get(config_set_id)
return config_set
def list_config_set(self):
config_set_list = self.daisy_client.config_sets.list()
return config_set_list
def delete_config_set(self, config_set_id):
self.daisy_client.config_sets.delete(config_set_id)
@classmethod
def _clean_all_config_set(self):
config_set_list_generator = self.daisy_client.config_sets.list()
config_set_list = [config_set for config_set in config_set_list_generator]
if config_set_list:
for config_set in config_set_list:
self.daisy_client.config_sets.delete(config_set.id)
@classmethod
def add_config_file(self, **config_file):
config_file = self.daisy_client.config_files.add(**config_file)
return config_file
@classmethod
def update_config_file(self, config_file_id, **config_file):
config_file = self.daisy_client.config_files.update(
config_file_id, **config_file)
return config_file
@classmethod
def get_config_file(self, config_file_id):
config_file = self.daisy_client.config_files.get(config_file_id)
return config_file
def list_config_file(self):
config_file_list = self.daisy_client.config_files.list()
return config_file_list
def delete_config_file(self, config_file_id):
self.daisy_client.config_files.delete(config_file_id)
@classmethod
def _clean_all_config_file(self):
config_file_list_generator = self.daisy_client.config_files.list()
config_file_list = [config_file for config_file in config_file_list_generator]
if config_file_list:
for config_file in config_file_list:
self.daisy_client.config_files.delete(config_file.id)
@classmethod
def list_service(self, **service_meta):
services_info = self.daisy_client.services.list(**service_meta)
return services_info
@classmethod
def add_service(self, **service_meta):
service_info = self.daisy_client.services.add(**service_meta)
return service_info
@classmethod
def get_service(self, service_id):
service_info = self.daisy_client.services.get(service_id)
return service_info
@classmethod
def delete_service(self, service_id):
self.daisy_client.services.delete(service_id)
@classmethod
def update_service(self, service_id, **service_meta):
service_info = self.daisy_client.services.update(
service_id, **service_meta)
return service_info
@classmethod
def list_component(self, **component_meta):
components_info = self.daisy_client.components.list(**component_meta)
return components_info
@classmethod
def add_component(self, **component_meta):
component_info = self.daisy_client.components.add(**component_meta)
return component_info
@classmethod
def get_component(self, component_id):
component_info = self.daisy_client.components.get(component_id)
return component_info
@classmethod
def delete_component(self, component_id):
self.daisy_client.components.delete(component_id)
@classmethod
def update_component(self, component_id, **component_meta):
component_info = self.daisy_client.components.update(
component_id, **component_meta)
return component_info
@classmethod
def add_cinder_volume(self, **cinder_volume_meta):
cinder_volume_info = self.daisy_client.disk_array.cinder_volume_add(
**cinder_volume_meta)
return cinder_volume_info
@classmethod
def update_cinder_volume(self, cinder_volume_id, **cinder_volume_meta):
cinder_volume_info = self.daisy_client.disk_array.cinder_volume_update(cinder_volume_id, **cinder_volume_meta)
return cinder_volume_info
@classmethod
def delete_cinder_volume(self, cinder_volume_id):
self.daisy_client.disk_array.cinder_volume_delete(cinder_volume_id)
@classmethod
def list_cinder_volume(self, **cinder_volume_meta):
cinder_volume_meta['filters'] = cinder_volume_meta
cinder_volume_list = self.daisy_client.disk_array.cinder_volume_list(
**cinder_volume_meta)
return cinder_volume_list
@classmethod
def get_cinder_volume_detail(self, cinder_volume_id):
cinder_volume_info = self.daisy_client.disk_array.cinder_volume_detail(cinder_volume_id)
return cinder_volume_info
@classmethod
def add_service_disk(self, **service_disk_meta):
service_disk_info = self.daisy_client.disk_array.service_disk_add(
**service_disk_meta)
return service_disk_info
@classmethod
def update_service_disk(self, service_disk_id, **service_disk_meta):
service_disk_info = self.daisy_client.disk_array.service_disk_update(
service_disk_id, **service_disk_meta)
return service_disk_info
@classmethod
def delete_service_disk(self, service_disk_id):
self.daisy_client.disk_array.service_disk_delete(service_disk_id)
@classmethod
def list_service_disk(self, **service_disk_meta):
service_disk_meta['filters'] = service_disk_meta
service_disk_list = self.daisy_client.disk_array.service_disk_list(
**service_disk_meta)
return service_disk_list
@classmethod
def get_service_disk_detail(self, service_disk_id):
service_disk_detail = self.daisy_client.disk_array.service_disk_detail(service_disk_id)
return service_disk_detail
@classmethod
def _clean_all_physical_node(self):
physical_node_list_generator = self.ironic_client.physical_node.list()
physical_node_list = [physical_node for physical_node in physical_node_list_generator]
if physical_node_list:
for physical_node in physical_node_list:
self.ironic_client.physical_node.delete(physical_node.uuid)
@classmethod
def template_add(self, **template):
template = self.daisy_client.template.add(**template)
return template
@classmethod
def template_update(self, template_id, **template):
template = self.daisy_client.template.update(template_id, **template)
return template
@classmethod
def template_detail(self, template_id):
template = self.daisy_client.template.get(template_id)
return template
@classmethod
def template_list(self, **kwargs):
template = self.daisy_client.template.list(**kwargs)
return template
@classmethod
def template_delete(self, template_id):
template = self.daisy_client.template.delete(template_id)
return template
@classmethod
def export_db_to_json(self, **kwargs):
template = self.daisy_client.template.export_db_to_json(**kwargs)
return template
@classmethod
def import_json_to_template(self, **kwargs):
template = self.daisy_client.template.import_json_to_template(**kwargs)
return template
@classmethod
def import_template_to_db(self, **kwargs):
template = self.daisy_client.template.import_template_to_db(**kwargs)
return template
@classmethod
def _clean_all_template(self):
template_generator = self.daisy_client.template.list()
templates = [template for template in template_generator]
if templates:
for template in templates:
self.template_delete(template.id)
@classmethod
def host_to_template(self, **kwargs):
host_template = self.daisy_client.template.host_to_template(**kwargs)
return host_template
@classmethod
def template_to_host(self, **kwargs):
hosts = self.daisy_client.template.template_to_host(**kwargs)
return hosts
@classmethod
def host_template_list(self, **kwargs):
host_templates = self.daisy_client.template.host_template_list(**kwargs)
return host_templates
@classmethod
def delete_host_template(self, **kwargs):
template = self.daisy_client.template.delete_host_template(**kwargs)
return template

View File

@ -0,0 +1,138 @@
#!/bin/bash
DISCOVERD_URL="http://127.0.0.1:5050/v1/continue"
#DISCOVERD_URL="http://192.168.0.11:5050/v1/continue"
function update() {
jq "$1" data.json > temp.json || echo "Error: update $1 to json failed"
mv temp.json data.json
}
function get_system_info(){
PRODUCT=$(dmidecode -s system-product-name)
FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2)
VERSION=$(dmidecode -s system-version)
SERIAL=$(dmidecode -s system-serial-number)
MANUFACTURER=$(dmidecode -s system-manufacturer)
UUID=$(dmidecode -s system-uuid)
#FQDN=$(hostname -f)
FQDN='Hostname'
echo '{"system":{}}' > data.json
update ".system[\"product\"] = \"$PRODUCT\""
update ".system[\"family\"] = \"$FAMILY\""
update ".system[\"fqdn\"] = \"$FQDN\""
update ".system[\"version\"] = \"$VERSION\""
update ".system[\"serial\"] = \"$SERIAL\""
update ".system[\"manufacturer\"] = \"$MANUFACTURER\""
update ".system[\"uuid\"] = \"$UUID\""
}
function get_cpu_info(){
REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l)
TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l)
update ".cpu[\"real\"] = $REAL"
update ".cpu[\"total\"] = $TOTAL"
for i in $(seq $TOTAL)
do
if [ ! -z "$i" ]; then
SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p")
SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p")
update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}"
fi
done
}
function get_memory_info(){
PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l)
TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2)
update ".memory[\"total\"] = \"$TOTAL_MEM\""
for num in $(seq $PHY_NUM)
do
SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p")
MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p")
update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}"
for i in $(seq $SLOTS)
do
if [ ! -z "$i" ]; then
DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p")
DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p")
DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p")
update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}"
fi
done
done
}
function get_net_info(){
for iface in $(ls /sys/class/net/ | grep -v lo)
do
NAME=$iface
MAC=$(ip link show $iface | awk '/ether/ {print $2}')
IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }')
NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}')
STATE=$(ip link show $iface | awk '/mtu/ {print $3}')
PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2)
CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}')
LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}')
#LINE=$[ LINE - 1 ]
LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}')
if [ $LINE -eq $LINE_SPEED ]; then
MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2)
else
MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}')
fi
UP="UP"
if [[ "$STATE" =~ "$UP" ]]; then
STATE="up"
else
STATE="down"
fi
if [ ! -z "$MAC" ]; then
update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\"}"
fi
done
}
function get_disk_info(){
for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}')
do
DISK_NAME=$disk
DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep $disk|cut -d "," -f2)
#DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}')
DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p)
DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p)
MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2)
REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}')
update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}"
done
}
function main(){
get_system_info
get_cpu_info
get_memory_info
get_net_info
get_disk_info
}
main
update ".ipmi_address = \"127.0.0.1\""
update ".data_name = \"baremetal_source\""
echo Collected:
cat data.json
RESULT=$(eval curl -i -X POST \
"-H 'Accept: application/json'" \
"-H 'Content-Type: application/json'" \
"-d @data.json" \
"$DISCOVERD_URL")
if echo $RESULT | grep "HTTP/1.0 4"; then
echo "Ironic API returned error: $RESULT"
fi
echo "Node is now discovered! Halting..."
sleep 5

View File

@ -0,0 +1,38 @@
import os
import time
import re
def mergeLog():
xmlHeader = '<?xml version="1.0" encoding="UTF-8"?>'
daisyHeader = '<daisy time="' + time.strftime('%Y/%m/%d %X') + '">'
daisyEnder = '</daisy>'
xmlList = []
xmlList.append(xmlHeader)
xmlList.append(daisyHeader)
for root, _, files in os.walk(r'.'):
for filename in files:
if (os.path.splitext(filename)[0] != 'daisy' and
os.path.splitext(filename)[0] != 'daisy_sonar' and
os.path.splitext(filename)[1] == '.xml'):
filepath = os.path.join(root, filename)
fin = open(filepath)
xmlList.append(fin.read()[len(xmlHeader):])
fin.close()
xmlList.append(daisyEnder)
text = ''.join(xmlList)
text = re.sub('message=".*?"', 'message=""', text)
fout = open('./daisy.xml', 'w')
fout.write(text)
fout.close()
text = re.sub('<!\[CDATA\[.*?\]\]>', '', text, flags=re.S)
fout = open('./daisy_sonar.xml', 'w')
fout.write(text)
fout.close()
mergeLog()

View File

@ -0,0 +1,292 @@
from tempest.api.daisy import base
from tempest import config
from nose.tools import set_trace
from daisyclient import exc as client_exc
import copy
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyCinderVolumeTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyCinderVolumeTest, cls).resource_setup()
cls.fake = logical_fake()
cls.cinder_volume_add_meta = {'disk_array': [{'management_ips': '10.43.177.1,10.43.177.2',
'pools': 'pool1,pool2',
'user_name': 'rooot',
'user_pwd': 'pwd',
'volume_driver': 'KS3200_FCSAN',
'volume_type': 'KISP-1'}]}
cls.cinder_volume_update_meta = {'management_ips': '10.43.177.3',
'pools': 'pool3',
'user_name': 'rooot',
'user_pwd': 'pwd',
'volume_driver': 'KS3200_FCSAN',
'volume_type': 'KISP-1'}
cls.cluster_meta = {'description': 'desc',
'logic_networks': [{'name': 'external1',
'physnet_name': 'phynet2',
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet2'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.150'],
['172.16.1.151',
'172.16.1.254']],
'gateway': '172.16.1.1',
'name': 'subnet10'}],
'type': 'external'},
{'name': 'internal2',
'physnet_name': 'phynet1',
'segmentation_id': 1023,
'segmentation_type': 'vxlan',
'shared': True,
'subnets': [{'cidr': '192.168.2.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.2.130',
'192.168.2.254']],
'gateway': '192.168.2.1',
'name': 'subnet123'}],
'type': 'internal'},
{'name': 'internal1',
'physnet_name': 'phynet3',
'segmentation_id': '777',
'segmentation_type': 'vlan',
'shared': False,
'subnets': [{'cidr': '192.168.31.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.31.130',
'192.168.31.254']],
'gateway': '192.168.31.1',
'name': 'subnet3'},
{'cidr': '192.168.4.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.4.130',
'192.168.4.254']],
'gateway': '192.168.4.1',
'name': 'subnet4'}],
'type': 'internal'}],
'name': 'test',
'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00',
'gre_id_range': [2, 2000],
'net_l23_provider': 'ovs',
'public_vip': '172.16.0.3',
'segmentation_type': 'vlan,vxlan',
'vlan_range': [2, 4094],
'vni_range': [1000, 1030]},
'networks': [],
'nodes': [],
'routers': [{'description': 'router1',
'external_logic_network': 'external1',
'name': 'router1',
'subnets': ['subnet4', 'subnet3', 'subnet2']},
{'description': 'router2',
'external_logic_network': 'external1',
'name': 'router2',
'subnets': ['subnet10']}]}
cls.role_meta = {'name': 'test_role',
'description': 'test'}
def private_network_add(self):
# add network plane
private_network_params = self.fake.fake_private_network_parameters()
private_network_params1 = self.fake.fake_private_network_parameters1()
private_network_params2 = self.fake.fake_private_network_parameters2()
private_network_params = self.add_network(**private_network_params)
private_network_params1 = self.add_network(**private_network_params1)
private_network_params2 = self.add_network(**private_network_params2)
self.private_network_id = private_network_params.id
self.private_network_id1 = private_network_params1.id
self.private_network_id2 = private_network_params2.id
self.cluster_meta['networks'] = [self.private_network_id,
self.private_network_id1,
self.private_network_id2]
return copy.deepcopy(private_network_params)
def private_network_delete(self):
set_trace()
self.delete_network(self.private_network_id)
self.delete_network(self.private_network_id1)
self.delete_network(self.private_network_id2)
def test_add_cinder_volume(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
self.assertEqual('10.43.177.1,10.43.177.2',
cinder_volume_info.management_ips,
"test_add_cinder_volume failed")
self.delete_cinder_volume(cinder_volume_info.id)
def test_add_same_cinder_volume(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
self.cinder_volume_add_meta['role_id']
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
self.assertRaisesMessage(client_exc.HTTPBadRequest,
"400 Bad Request: cinder_volume array disks "
"conflict with cinder_volume %s (HTTP 400)" %
cinder_volume_info.id,
self.add_cinder_volume,
**self.cinder_volume_add_meta)
self.delete_cinder_volume(cinder_volume_info.id)
def test_add_cinder_volume_with_wrong_role(self):
self.cinder_volume_add_meta['role_id'] = 'af47d81c-7ae4-4148-a801-b4a5c6a52074'
self.assertRaisesMessage(client_exc.HTTPNotFound,
"404 Not Found: The resource could not be "
"found.: Role with identifier "
"af47d81c-7ae4-4148-a801-b4a5c6a52074 not "
"found (HTTP 404)",
self.add_cinder_volume,
**self.cinder_volume_add_meta)
del self.cinder_volume_add_meta['role_id']
def test_add_cinder_volume_with_wrong_driver(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
self.cinder_volume_add_meta['disk_array'][0]['volume_driver'] = 'test_driver'
self.assertRaisesMessage(client_exc.HTTPBadRequest,
"400 Bad Request: volume_driver test_driver "
"is not supported (HTTP 400)",
self.add_cinder_volume,
**self.cinder_volume_add_meta)
del self.cinder_volume_add_meta['role_id']
self.cinder_volume_add_meta['disk_array'][0]['volume_driver'] = 'KS3200_FCSAN'
def test_update_cinder_volume(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
cinder_volume_update_info = self.update_cinder_volume(cinder_volume_info.id, **self.cinder_volume_update_meta)
self.assertEqual('10.43.177.3',
cinder_volume_update_info.management_ips,
"test_update_cinder_volume failed")
self.delete_cinder_volume(cinder_volume_info.id)
def test_update_to_same_cinder_volume(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
cinder_volume_add_meta1 = {'disk_array': [{'management_ips':
'10.43.177.3,10.43.177.4',
'pools': 'pool1,pool2',
'user_name': 'rooot',
'user_pwd': 'pwd',
'volume_driver': 'KS3200_FCSAN',
'volume_type': 'KISP-1'}]}
cinder_volume_add_meta1['role_id'] = role.id
cinder_volume_info1 = self.add_cinder_volume(**cinder_volume_add_meta1)
update_meta = {'management_ips': '10.43.177.1,10.43.177.2'}
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: cinder_volume array disks conflict with "
"cinder_volume %s (HTTP 400)" % cinder_volume_info.id,
self.update_cinder_volume,
cinder_volume_info1.id,
**update_meta)
self.delete_cinder_volume(cinder_volume_info.id)
self.delete_cinder_volume(cinder_volume_info1.id)
def test_update_cinder_volume_with_wrong_driver(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
update_meta = {'volume_driver': 'test_driver'}
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: volume_driver test_driver is not supported"
" (HTTP 400)",
self.update_cinder_volume, cinder_volume_info.id, **update_meta)
self.delete_cinder_volume(cinder_volume_info.id)
def test_list_cinder_volume(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
cinder_volume_meta = {}
cinder_volume_flag = False
list_cinder_volume = self.list_cinder_volume(**cinder_volume_meta)
query_cinder_volume_list = [volume_info for volume_info in list_cinder_volume]
if query_cinder_volume_list:
cinder_volume_flag = True
self.assertTrue(cinder_volume_flag, "test_list_cinder_volume error")
self.delete_cinder_volume(cinder_volume_info.id)
def test_get_cinder_volume_detail(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta)
self.role_meta['cluster_id'] = cluster_info.id
role = self.add_role(**self.role_meta)
self.cinder_volume_add_meta['role_id'] = role.id
cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta)
cinder_volume_detail_info = self.get_cinder_volume_detail(cinder_volume_info.id)
self.assertEqual("10.43.177.1,10.43.177.2",
cinder_volume_detail_info.management_ips,
"test_get_cinder_volume_detail failed")
self.delete_cinder_volume(cinder_volume_info.id)
def tearDown(self):
if self.cinder_volume_add_meta.get('role_id', None):
self.delete_role(self.cinder_volume_add_meta['role_id'])
del self.cinder_volume_add_meta['role_id']
if self.role_meta.get('cluster_id', None):
self.delete_cluster(self.role_meta['cluster_id'])
del self.role_meta['cluster_id']
super(DaisyCinderVolumeTest, self).tearDown()

View File

@ -0,0 +1,371 @@
# -*- coding: UTF-8 -*-
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tempest.api.daisy import base
from tempest import config
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
import copy
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TecsClusterTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(TecsClusterTest, cls).resource_setup()
cls.fake = logical_fake()
cls.host_meta = {'name': 'test_add_host',
'description': 'test_tempest'}
cls.cluster_meta1 = {'description': 'desc',
'name': 'test'}
cls.cluster_meta2 = {'description': 'desc',
'logic_networks': [{'name': 'external1',
'physnet_name': 'phynet2',
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet2'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.150']],
'gateway': '172.16.1.1',
'name': 'subnet10'}],
'type': 'external'},
{'name': 'internal1',
'physnet_name': 'phynet1',
'segmentation_id': '777',
'segmentation_type': 'vlan',
'shared': False,
'subnets': [{'cidr': '192.168.31.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.31.130',
'192.168.31.254']],
'gateway': '192.168.31.1',
'name': 'subnet3'},
{'cidr': '192.168.4.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.4.130',
'192.168.4.254']],
'gateway': '192.168.4.1',
'name': 'subnet4'}],
'type': 'internal'}],
'name': 'test',
'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00',
'gre_id_range': [2, 2000],
'net_l23_provider': 'ovs',
'public_vip': '172.16.0.3',
'segmentation_type': 'vlan,vxlan',
'vlan_range': [2, 4094],
'vni_range': [1000, 1030]},
'networks': [],
'nodes': [],
'routers': [{'description': 'router1',
'external_logic_network': 'external1',
'name': 'router1',
'subnets': ['subnet4']},
{'description': 'router2',
'external_logic_network': 'external1',
'name': 'router2',
'subnets': ['subnet10']}]}
cls.cluster_meta3 = {'description': "This cluster's name is null",
'name': ""}
cls.cluster_meta4 = {'description': "",
'name': "rwj_test_add_cluster_no_description"}
cls.cluster_meta5 = {'description': "test_add_host5",
'name': "test_add_host5"}
cls.cluster_meta6 = {'description': "test_add_host6",
'name': "test_add_host6"}
cls.cluster_meta7 = {'description': "test_add_host7",
'name': "test_add_host7"}
cls.cluster_meta8 = {'description': "test_add_host7",
'name': "test_add_host7",
'auto_scale': 1}
cls.cluster_meta9 = {'description': "test_with_hwm",
'name': "test_with_hwm",
'hwm_ip': "10.43.211.63"}
def private_network_add(self):
private_network_params = self.fake.fake_private_network_parameters()
private_network_params1 = self.fake.fake_private_network_parameters1()
private_network_params2 = self.fake.fake_private_network_parameters2()
private_network_params = self.add_network(**private_network_params)
private_network_params1 = self.add_network(**private_network_params1)
private_network_params2 = self.add_network(**private_network_params2)
self.private_network_id = private_network_params.id
self.private_network_id1 = private_network_params1.id
self.private_network_id2 = private_network_params2.id
self.cluster_meta2['networks'] = [self.private_network_id,
self.private_network_id1,
self.private_network_id2]
return copy.deepcopy(private_network_params)
def private_network_delete(self):
self.delete_network(self.private_network_id)
self.delete_network(self.private_network_id1)
self.delete_network(self.private_network_id2)
def test_add_cluster_with_networking_parameters(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta2)
self.assertEqual(self.cluster_meta2['name'], cluster_info.name, "cluster name is not correct")
self.assertEqual(self.cluster_meta2['description'], cluster_info.description, "cluster add interface execute failed")
self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_info.base_mac, "cluster add interface execute failed")
self.assertEqual(self.cluster_meta2['networking_parameters']['net_l23_provider'], cluster_info.net_l23_provider, "cluster add interface execute failed")
self.assertEqual(self.cluster_meta2['networking_parameters']['public_vip'], cluster_info.public_vip, "cluster add interface execute failed")
self.assertEqual(self.cluster_meta2['networking_parameters']['segmentation_type'], cluster_info.segmentation_type, "cluster add interface execute failed")
self.delete_cluster(cluster_info.id)
def test_add_cluster_no_networking_parameters(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
self.assertEqual(self.cluster_meta1['name'], cluster_info.name, "cluster add interface is not correct")
self.assertEqual(self.cluster_meta1['description'], cluster_info.description, "cluster add interface execute failed")
self.delete_cluster(cluster_info.id)
def test_add_cluster_with_networking_parameters_no_routers(self):
if self.cluster_meta2.get('routers', None):
self.private_network_add()
cluster_temp = self.cluster_meta2.copy()
del cluster_temp['routers']
cluster_info = self.add_cluster(**cluster_temp)
# cluster = self.get_cluster(cluster_info.id)
self.assertEqual(cluster_temp['name'], cluster_info.name, "cluster add interface execute failed")
self.delete_cluster(cluster_info.id)
def test_add_cluster_with_nodes(self):
host_info = self.add_host(**self.host_meta)
nodes = []
nodes.append(host_info.id)
self.cluster_meta1['nodes'] = nodes
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
cluster = self.get_cluster(cluster_info.id)
self.assertEqual(self.cluster_meta1['name'], cluster.name, "add cluster with nodes is not correct")
self.assertEqual(self.cluster_meta1['description'], cluster.description, "add cluster with nodes execute failed")
self.assertEqual(self.cluster_meta1['nodes'], cluster.nodes, "add cluster with nodes execute failed")
self.delete_cluster(cluster_info.id)
self.delete_host(host_info.id)
def test_update_cluster_with_no_networking_parameters(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
self.cluster_meta1['name'] = "test_name"
self.cluster_meta1['description'] = "test_desc"
cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta1)
self.assertEqual(self.cluster_meta1['name'], cluster_update_info.name, "cluster update interface is not correct")
self.assertEqual(self.cluster_meta1['description'], cluster_update_info.description, "cluster update interface is not correct")
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_nodes(self):
host_info = self.add_host(**self.host_meta)
nodes = []
nodes.append(host_info.id)
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
self.cluster_meta1['nodes'] = nodes
cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta1)
cluster = self.get_cluster(cluster_info.id)
self.assertEqual(self.cluster_meta1['name'], cluster_update_info.name, "update cluster with nodes is not correct")
self.assertEqual(self.cluster_meta1['description'], cluster_update_info.description, "update cluster with nodes execute failed")
self.assertEqual(self.cluster_meta1['nodes'], cluster.nodes, "update cluster with nodes execute failed")
self.delete_cluster(cluster_info.id)
self.delete_host(host_info.id)
def test_update_cluster_with_networking_parameters(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta2)
self.assertEqual(self.cluster_meta2['name'], cluster_update_info.name, "update cluster with networking parameters is not correct")
self.assertEqual(self.cluster_meta2['description'], cluster_update_info.description, "update cluster with networking parameters execute failed")
# cluster = self.get_cluster(cluster_info.id)
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_hwm(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta1)
hwm_meta = {"hwm_ip": "10.43.211.63"}
cluster_update_info = self.update_cluster(cluster_info.id, **hwm_meta)
self.assertEqual("10.43.211.63", cluster_update_info.hwm_ip,
"Update cluster with hwm_ip failed")
self.delete_cluster(cluster_info.id)
def test_update_cluster_with_networking_parameters_add_router(self):
""" """
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta2)
router = {'description': 'router3',
'external_logic_network': 'external1',
'name': 'router3',
'subnets': ['subnet3']}
self.cluster_meta2['routers'].append(router)
cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta2)
self.assertEqual(self.cluster_meta2['name'], cluster_update_info.name, "update cluster with networking parameters is not correct")
self.assertEqual(self.cluster_meta2['description'], cluster_update_info.description, "update cluster with networking parameters execute failed")
# cluster = self.get_cluster(cluster_info.id)
self.delete_cluster(cluster_info.id)
def test_list_cluster(self):
# filter_cluster_meta = {}
# self.list_clusters()
pass
def test_list_cluster_filter_by_name(self):
self.add_cluster(**self.cluster_meta1)
# cluster_info5 = self.add_cluster(**self.cluster_meta5)
filter_cluster_meta = {'name': "test"}
list_clusters = self.list_filter_clusters(**filter_cluster_meta)
cluster_flag = False
for query_cluster in list_clusters:
if query_cluster.name == "test":
cluster_flag = True
self.assertTrue(cluster_flag, "test_list_cluster_filter_by_name error")
def test_delete_cluster(self):
cluster_info1 = self.add_cluster(**self.cluster_meta1)
cluster_info5 = self.add_cluster(**self.cluster_meta5)
self.delete_cluster(cluster_info1.id)
cluster_flag = True
cluster_meta = {}
list_cluster = self.list_clusters(**cluster_meta)
for query_cluster in list_cluster:
if query_cluster.id == cluster_info1.id:
cluster_flag = False
self.assertTrue(cluster_flag, "test_delete_cluster error")
self.delete_cluster(cluster_info5.id)
def test_list_cluster_by_sort_key(self):
cluster_info5 = self.add_cluster(**self.cluster_meta5)
cluster_info6 = self.add_cluster(**self.cluster_meta6)
cluster_info7 = self.add_cluster(**self.cluster_meta7)
cluster_id_sort = sorted([cluster_info5.id, cluster_info6.id, cluster_info7.id], reverse=True)
cluster_meta = {'sort_key': "id"}
list_cluster = self.list_clusters(**cluster_meta)
query_cluster_id_list = [cluster_info.id for cluster_info in list_cluster]
self.assertEqual(query_cluster_id_list, cluster_id_sort, "test_list_cluster_by_sort_key error")
self.delete_cluster(cluster_info5.id)
self.delete_cluster(cluster_info6.id)
self.delete_cluster(cluster_info7.id)
def test_list_cluster_by_sort_dir(self):
cluster_info5 = self.add_cluster(**self.cluster_meta5)
cluster_info6 = self.add_cluster(**self.cluster_meta6)
cluster_info7 = self.add_cluster(**self.cluster_meta7)
cluster_name_sort = ['test_add_host7', 'test_add_host6', 'test_add_host5']
cluster_meta = {'sort_dir': "desc", 'sort_key': "name"}
list_cluster = self.list_clusters(**cluster_meta)
query_cluster_name_list = [cluster_info.name for cluster_info in list_cluster]
self.assertEqual(query_cluster_name_list, cluster_name_sort, "test_list_cluster_by_sort_dir error")
self.delete_cluster(cluster_info5.id)
self.delete_cluster(cluster_info6.id)
self.delete_cluster(cluster_info7.id)
def test_list_cluster_by_sort_limit(self):
cluster_info5 = self.add_cluster(**self.cluster_meta5)
cluster_info6 = self.add_cluster(**self.cluster_meta6)
cluster_info7 = self.add_cluster(**self.cluster_meta7)
cluster_meta = {'page_size': "1", 'sort_dir': "desc", 'sort_key': "name"}
list_cluster = self.list_clusters(**cluster_meta)
query_cluster_id_list = [cluster_info.id for cluster_info in list_cluster]
self.assertEqual(query_cluster_id_list, [cluster_info7.id], "test_list_cluster_by_sort_key error")
self.delete_cluster(cluster_info5.id)
self.delete_cluster(cluster_info6.id)
self.delete_cluster(cluster_info7.id)
def test_add_cluster_with_neutron_parameters(self):
self.private_network_add()
add_host = self.add_cluster(**self.cluster_meta2)
cluster_detail = self.get_cluster(add_host.id)
self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_detail.base_mac, "cluster add networking_parameters failed")
router_flag = False
floating_ranges_flag = False
dns_nameservers_flag = False
if (cluster_detail.routers[0]['name'] == 'router1') or (cluster_detail.routers[0]['name'] == 'router2'):
router_flag = True
if (cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.4.130', '192.168.4.254']]) or \
(cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.1.2', '192.168.1.200']]) or \
(cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['172.16.1.130', '172.16.1.150']]) or \
(cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.31.130', '192.168.31.254']]):
floating_ranges_flag = True
if cluster_detail.logic_networks[0]['subnets'][0]['dns_nameservers'] == ['8.8.8.8', '8.8.4.4'] or \
cluster_detail.logic_networks[0]['subnets'][0]['dns_nameservers'] == ['8.8.4.4', '8.8.8.8']:
dns_nameservers_flag = True
self.assertTrue(router_flag, "cluster add floating_ranges failed")
self.assertTrue(floating_ranges_flag, "cluster add floating_ranges failed")
self.assertTrue(dns_nameservers_flag, "cluster add dns_nameservers failed")
self.delete_cluster(add_host.id)
def test_cluster_detail_info(self):
self.private_network_add()
add_cluster = self.add_cluster(**self.cluster_meta2)
cluster_detail = self.get_cluster(add_cluster.id)
self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_detail.base_mac, "cluster base_mac detail failed")
self.assertEqual(self.cluster_meta2['name'], cluster_detail.name, "cluster name detail failed")
self.assertEqual(self.cluster_meta2['description'], cluster_detail.description, "cluster description detail failed")
self.assertEqual(self.cluster_meta2['networking_parameters']['public_vip'], cluster_detail.public_vip, "cluster public_vip detail failed")
self.private_network_delete()
def test_add_cluster_no_description(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta4)
if cluster_info.description is None:
self.assertEqual(self.cluster_meta4['description'], cluster_info.description, "cluster add interface execute failed")
print "\n ===========cluster_description= %s ", cluster_info.description
print "\n ===========STC-F-Daisy_Cluster-0013 run is over ==============="
self.delete_cluster(cluster_info.id)
def test_add_cluster_set_auto_scale(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta8)
if cluster_info:
self.assertEqual(self.cluster_meta8['auto_scale'], cluster_info.auto_scale, "cluster add set auto_scale=1 failed")
print "\n ===========cluster auto_scale= %s ", cluster_info.auto_scale
print "\n ===========STC-F-Daisy_Cluster-0020 run is over ==============="
self.delete_cluster(cluster_info.id)
def test_add_cluster_with_hwm(self):
self.private_network_add()
cluster_info = self.add_cluster(**self.cluster_meta9)
if cluster_info:
self.assertEqual(self.cluster_meta9['hwm_ip'], cluster_info.hwm_ip,
"Add cluster with hwm_ip failed")
self.delete_cluster(cluster_info.id)
def tearDown(self):
if self.cluster_meta1.get('nodes', None):
del self.cluster_meta1['nodes']
self._clean_all_cluster()
super(TecsClusterTest, self).tearDown()

View File

@ -0,0 +1,143 @@
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
class DaisyComponentTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyComponentTest, cls).resource_setup()
cls.host_meta = {'name': 'test_add_host',
'description': 'test_tempest'}
cls.host_meta_interfaces = {'type': 'ether',
'name': 'eth1',
'mac': 'fe80::f816:3eff',
'ip': '10.43.177.121',
'netmask': '255.255.254.0',
'is_deployment': 'True',
'assigned_networks': ['MANAGEMENT', 'DEPLOYMENT'],
'slaves': 'eth1'}
cls.cluster_meta = {'description': 'desc',
'logic_networks': [{'name': 'external1',
'physnet_name': 'PRIVATE',
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet2'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.150'],
['172.16.1.151',
'172.16.1.254']],
'gateway': '172.16.1.1',
'name': 'subnet10'}],
'type': 'external'},
{'name': 'external2',
'physnet_name': 'PUBLIC',
'segmentation_id': 1023,
'segmentation_type': 'vxlan',
'shared': True,
'subnets': [{'cidr': '192.168.2.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.2.130',
'192.168.2.254']],
'gateway': '192.168.2.1',
'name': 'subnet123'}],
'type': 'external'},
{'name': 'internal1',
'physnet_name': 'PRIVATE',
'segmentation_id': '777',
'segmentation_type': 'vlan',
'shared': False,
'subnets': [{'cidr': '192.168.31.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.31.130',
'192.168.31.254']],
'gateway': '192.168.31.1',
'name': 'subnet3'},
{'cidr': '192.168.4.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.4.130',
'192.168.4.254']],
'gateway': '192.168.4.1',
'name': 'subnet4'}],
'type': 'internal'}],
'name': 'test',
'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00',
'gre_id_range': [2, 2000],
'net_l23_provider': 'ovs',
'public_vip': '172.16.0.3',
'segmentation_type': 'vlan,vxlan',
'vlan_range': [2, 4094],
'vni_range': [1000, 1030]},
'networks': [],
'nodes': [],
'routers': [{'description': 'router1',
'external_logic_network': 'external1',
'name': 'router1',
'subnets': ['subnet4', 'subnet3']},
{'description': 'router2',
'external_logic_network': 'external2',
'name': 'router2',
'subnets': ['subnet2', 'subnet10']}]}
cls.component_meta = {'name': 'test_component',
'description': 'test'}
def test_list_component(self):
component_meta = {}
component_flag = True
list_component = self.list_component(**component_meta)
query_component_list = [component_info for component_info in list_component]
component_list = ["camellia", "ha", "loadbalance", "amqp", "database",
"keystone", "ironic", "neutron",
"horizon", "ceilometer", "glance", "heat", "nova", "cinder"]
for query_component in query_component_list:
if query_component.name not in component_list:
component_flag = False
self.assertTrue(component_flag, "test_list_component error")
def test_add_component(self):
component = self.add_component(**self.component_meta)
self.assertEqual("test_component", component.name, "test_add_component failed")
self.delete_component(component.id)
def test_component_delete(self):
component = self.add_component(**self.component_meta)
self.delete_component(component.id)
component_flag = True
component_meta = {}
list_component = self.list_component(**component_meta)
query_component_list = [component_info for component_info in list_component]
for query_component in query_component_list:
if component.name == query_component.name:
component_flag = False
self.assertTrue(component_flag, "test_list_component error")
def test_get_component_detail(self):
add_component_info = self.add_component(**self.component_meta)
get_component = self.get_component(add_component_info.id)
self.assertEqual('test_component', get_component.name)
self.delete_component(get_component.id)
def test_update_component(self):
add_component_info = self.add_component(**self.component_meta)
update_component_meta = {'name': 'test_update_component',
'description': 'test_tempest'}
update_component_info = self.update_component(add_component_info.id, **update_component_meta)
self.assertEqual("test_update_component", update_component_info.name, "test_update_component_with_cluster failed")
self.delete_component(add_component_info.id)

View File

@ -0,0 +1,70 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
class DaisyConfigFileTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyConfigFileTest, cls).resource_setup()
def test_add_config_file(self):
config_file = {'name': 'add_config_file',
'description': 'config_file_test'}
add_config_file = self.add_config_file(**config_file)
self.assertEqual('add_config_file', add_config_file.name)
def test_update_config_file(self):
config_file_meta = {'name': 'add_config_file',
'description': 'config_file_test'}
add_config_file = self.add_config_file(**config_file_meta)
update_config_file_meta = {'name': 'update_config_file'}
update_config_file = self.update_config_file(add_config_file.id, **update_config_file_meta)
self.assertEqual('update_config_file', update_config_file.name)
def test_get_config_file(self):
config_file_meta = {'name': 'add_config_file',
'description': 'config_file_test'}
add_config_file = self.add_config_file(**config_file_meta)
get_config_file = self.get_config_file(add_config_file.id)
self.assertEqual('add_config_file', get_config_file.name)
def test_list_config_file(self):
config_file_flag = False
config_file_meta = {'name': 'add_config_file',
'description': 'config_file_test'}
self.add_config_file(**config_file_meta)
list_config_file = self.list_config_file()
config_file_list = [config_file for config_file in list_config_file]
if config_file_list:
config_file_flag = True
self.assertTrue(config_file_flag, "test_list_config_file error")
def test_delete_config_file(self):
config_file = {'name': 'add_config_file',
'description': 'config_file_test'}
add_config_file = self.add_config_file(**config_file)
self.delete_config_file(add_config_file.id)
def tearDown(self):
self._clean_all_config_file()
super(DaisyConfigFileTest, self).tearDown()

View File

@ -0,0 +1,121 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
import time
from daisyclient import exc as client_exc
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyDiscoverHostTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyDiscoverHostTest, cls).resource_setup()
cls.fake = logical_fake()
cls.host_meta = {'ip': '127.0.0.1',
'passwd': 'ossdbg1'}
def test_add_dicover_host(self):
host = self.add_discover_host(**self.host_meta)
self.assertEqual("init", host.status, "add discover host failed")
self.delete_discover_host(host.id)
def test_delete_dicover_host(self):
host = self.add_discover_host(**self.host_meta)
self.delete_discover_host(host.id)
def test_list_discover_host(self):
host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2'}
self.add_discover_host(**self.host_meta)
self.add_discover_host(**host_meta)
query_hosts = self.list_discover_host()
hosts = [host for host in query_hosts]
host_count = len(hosts)
self.assertEqual(2, host_count, "list discover host failed")
def test_update_discover_host(self):
add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'}
host_1 = self.add_discover_host(**add_host_meta)
self.assertEqual("root", host_1.user, "add discover host failed")
update_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg1', 'user': 'root2'}
update_host = self.update_discover_host(host_1.id, **update_host_meta)
self.assertEqual("ossdbg1", update_host.passwd, "update discover host failed")
self.assertEqual("root2", update_host.user, "update discover host failed")
def test_get_discover_host_detail(self):
add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'}
host_1 = self.add_discover_host(**add_host_meta)
host_info = self.get_discover_host_detail(host_1.id)
self.assertEqual("root", host_info.user, "get discover host failed")
self.assertEqual("ossdbg2", host_info.passwd, "get discover host failed")
self.assertEqual("127.0.0.2", host_info.ip, "get discover host failed")
def test_add_discover_host_without_passwd(self):
add_host_meta = {'ip': '127.0.0.2', 'user': 'root'}
ex = self.assertRaises(client_exc.HTTPBadRequest, self.add_discover_host, **add_host_meta)
self.assertIn("PASSWD parameter can not be None.", str(ex))
def test_add_discover_host_with_repeat_ip(self):
# add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'}
# host_1 = self.add_discover_host(**add_host_meta)
# ex = self.assertRaises(client_exc.HTTPForbidden, self.add_discover_host, **add_host_meta)
# self.assertIn("403 Forbidden: ip %s already existed." % add_host_meta['ip'], str(ex))
pass
def test_discover_host(self):
daisy_endpoint = CONF.daisy.daisy_endpoint
def GetMiddleStr(content, startStr, endStr):
startIndex = content.index(startStr)
if startIndex >= 0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
local_ip = GetMiddleStr(daisy_endpoint, 'http://', ':19292')
discover_host_meta1 = {}
discover_host_meta1['ip'] = local_ip
discover_host_meta1['passwd'] = 'ossdbg1'
self.add_discover_host(**discover_host_meta1)
discover_host = {}
self.discover_host(**discover_host)
time.sleep(8)
discover_flag = 'false'
while 1:
print("discovring!!!!!!!!")
if discover_flag == 'true':
break
discovery_host_list_generator = self.list_discover_host()
discovery_host_list = [discover_host_tmp for discover_host_tmp in discovery_host_list_generator]
for host in discovery_host_list:
if host.status == 'DISCOVERY_SUCCESSFUL':
discover_flag = 'true'
else:
discover_flag = 'false'
self.assertEqual("true", discover_flag, "discover host failed")
def tearDown(self):
if self.host_meta.get('user', None):
del self.host_meta['user']
if self.host_meta.get('status', None):
del self.host_meta['status']
self._clean_all_discover_host()
super(DaisyDiscoverHostTest, self).tearDown()

View File

@ -0,0 +1,61 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.daisy import base
from tempest import config
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class DaisyHwmTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyHwmTest, cls).resource_setup()
cls.fake = logical_fake()
cls.hwm_meta = {'hwm_ip': '10.43.211.63',
'description': 'the first hwm'}
def test_add_hwm(self):
hwm = self.add_hwm(**self.hwm_meta)
self.assertEqual("10.43.211.63", hwm.hwm_ip, "add-hwm failed")
def test_update_hwm(self):
update_hwm_meta = {'hwm_ip': '10.43.174.11'}
add_hwm = self.add_hwm(**self.hwm_meta)
update_hwm = self.update_hwm(add_hwm.id, **update_hwm_meta)
self.assertEqual("10.43.174.11", update_hwm.hwm_ip,
"update-hwm failed")
def test_hwm_detail_info(self):
add_hwm = self.add_hwm(**self.hwm_meta)
hwm_detail = self.get_hwm_detail(add_hwm.id)
self.assertEqual("10.43.211.63", hwm_detail.hwm_ip,
"test_hwm_detail_info failed")
def test_hwm_list(self):
self.add_hwm(**self.hwm_meta)
hwms = self.list_hwm()
for hwm in hwms:
self.assertTrue(hwm is not None)
def test_hwm_delete(self):
hwm = self.add_hwm(**self.hwm_meta)
self.delete_hwm(hwm.id)
def tearDown(self):
self._clean_all_hwm()
super(DaisyHwmTest, self).tearDown()

View File

@ -0,0 +1,273 @@
import copy
from daisyclient import exc as client_exc
from tempest.api.daisy import base
from tempest import config
from fake.logical_network_fake import FakeLogicNetwork as logical_fake
CONF = config.CONF
class TecsLogicalNetworkTest(base.BaseDaisyTest):
LOGICAL_FILTER = ['name', 'physnet_name', 'segmentation_id',
'segmentation_type', 'shared', 'type']
SUBNET_FILTER = ['name', 'dns_nameservers', 'floating_ranges', 'gateway', 'cidr']
ROUTER_FILTER = ['name', 'description', 'external_logic_network', 'subnets']
@classmethod
def resource_setup(cls):
super(TecsLogicalNetworkTest, cls).resource_setup()
cls.fake = logical_fake()
def _verify_logical_params(self, cluster_meta, fake_logical):
cluster_meta['logic_networks'] = \
[dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.LOGICAL_FILTER, logic_network.items()))
for logic_network in cluster_meta['logic_networks']]
tmp_fake_logical = [dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.LOGICAL_FILTER,
logic_network.items()))
for logic_network in fake_logical]
if cluster_meta['logic_networks'] != tmp_fake_logical:
cluster_meta['logic_networks'].reverse()
return tmp_fake_logical
def _verify_router_params(self, cluster_meta):
cluster_meta['routers'] = \
[dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.ROUTER_FILTER, router.items()))
for router in cluster_meta['routers']]
for router in cluster_meta['routers']:
router['subnets'] = copy.deepcopy(list(set(router['subnets'])))
def private_network_add(self):
# add network plane
private_network_params = self.fake.fake_private_network_parameters()
private_network_params = self.add_network(**private_network_params)
self.private_network_id = private_network_params.id
return copy.deepcopy(private_network_params)
def private_network_delete(self):
self.delete_network(self.private_network_id)
# STC-F-Daisy_Logical_Network-0001
def test_add_all_params(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_logical = self.fake.fake_logical_parameters(private_network)
fake_routers = self.fake.fake_router_parameters()
fake_network = self.fake.fake_network_parameters()
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical,
'routers': fake_routers})
cluster_info = self.add_cluster(**fake_cluster)
cluster_meta = self.get_cluster(cluster_info.id).to_dict()
self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network)
fake_logical = self._verify_logical_params(cluster_meta, fake_logical)
self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical)
self._verify_router_params(cluster_meta)
self.assertEqual(cluster_meta.get('routers', None), fake_routers)
self.delete_cluster(cluster_info.id)
# STC-A-Daisy_Logical_Network-0004
def test_add_without_logical_parameters_exc(self):
fake_cluster = self.fake.fake_cluster_parameters()
fake_routers = self.fake.fake_router_parameters()
fake_network = self.fake.fake_network_parameters()
fake_cluster.update({'networking_parameters': fake_network,
'routers': fake_routers})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Logic_network flat1 is not valid range. (HTTP 400)",
self.add_cluster, **fake_cluster)
# STC-F-Daisy_Logical_Network-0002
def test_add_network_params_only(self):
fake_cluster = self.fake.fake_cluster_parameters()
fake_network = self.fake.fake_network_parameters()
fake_cluster.update({'networking_parameters': fake_network})
cluster_info = self.add_cluster(**fake_cluster)
cluster_meta = self.get_cluster(cluster_info.id).to_dict()
self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network)
self.delete_cluster(cluster_info.id)
# STC-F-Daisy_Logical_Network-0003
def test_add_network_and_logical_params(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_logical = self.fake.fake_logical_parameters(private_network)
fake_network = self.fake.fake_network_parameters()
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical})
cluster_info = self.add_cluster(**fake_cluster)
cluster_meta = self.get_cluster(cluster_info.id).to_dict()
self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network)
fake_logical = self._verify_logical_params(cluster_meta, fake_logical)
self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical)
self.delete_cluster(cluster_info.id)
# STC-A-Daisy_Logical_Network-0007
def test_routers_params_valid_check_exc(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_logical = self.fake.fake_logical_parameters(private_network)
fake_network = self.fake.fake_network_parameters()
fake_router = self.fake.fake_router_parameters2()
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical,
'routers': fake_router})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Logic network's subnets is all related with a router, it's not allowed. (HTTP 400)",
self.add_cluster, **fake_cluster)
tmp_fake_router1 = copy.deepcopy(fake_router)
tmp_fake_router1[0]['name'] = "test"
fake_cluster.update({'routers': tmp_fake_router1})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Logic network's subnets is all related with a router, it's not allowed. (HTTP 400)",
self.add_cluster, **fake_cluster)
tmp_fake_router2 = copy.deepcopy(fake_router)
tmp_fake_router2[0]['external_logic_network'] = "test"
fake_cluster.update({'routers': tmp_fake_router2})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Logic_network test is not valid range. (HTTP 400)",
self.add_cluster, **fake_cluster)
tmp_fake_router3 = copy.deepcopy(fake_router)
tmp_fake_router3[0]['subnets'] = ['test']
fake_cluster.update({'routers': tmp_fake_router3})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Subnet test is not valid range. (HTTP 400)",
self.add_cluster, **fake_cluster)
self.private_network_delete()
# TODO:name
# STC-A-Daisy_Logical_Network-0008
def test_subnets_params_valid_check_exc(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_logical = self.fake.fake_logical_parameters(private_network)
fake_network = self.fake.fake_network_parameters()
tmp_fake_logical1 = copy.deepcopy(fake_logical)
tmp_fake_logical1[0]['subnets'] = self.fake.fake_subnet_parameters2()
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': tmp_fake_logical1})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Between floating ip range can not be overlap. (HTTP 400)",
self.add_cluster, **fake_cluster)
tmp_fake_logical2 = copy.deepcopy(self.fake.fake_logical_parameters2())
tmp_fake_logical2[0].update({'subnets': self.fake.fake_subnet_parameters2()})
tmp_fake_logical2[0]['subnets'][0].update({'floating_ranges': []})
tmp_fake_logical2[0]['subnets'][1].update({'floating_ranges': []})
fake_cluster.update({'logic_networks': tmp_fake_logical2})
self.assertRaisesMessage(
client_exc.HTTPBadRequest,
"400 Bad Request: Subnet name segment is repetition. (HTTP 400)",
self.add_cluster, **fake_cluster)
self.private_network_delete()
# STC-A-Daisy_Logical_Network-0009
def test_update_all_params(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_network = self.fake.fake_network_parameters()
fake_logical = self.fake.fake_logical_parameters(private_network)
# add
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical,
'routers': self.fake.fake_router_parameters()})
cluster_id1 = self.add_cluster(**fake_cluster).id
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical,
'routers': self.fake.fake_router_parameters()})
# update
cluster_id2 = self.update_cluster(cluster_id1, **fake_cluster)
cluster_meta = self.get_cluster(cluster_id2).to_dict()
# check
self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network)
tmp_fake_logical = self._verify_logical_params(cluster_meta, fake_logical)
self.assertEqual(cluster_meta.get('logic_networks', None), tmp_fake_logical)
self._verify_router_params(cluster_meta)
self.assertEqual(cluster_meta.get('routers', None), self.fake.fake_router_parameters())
self.delete_cluster(cluster_id2)
# STC-A-Daisy_Logical_Network-0010
def test_get_all_params(self):
private_network = self.private_network_add()
fake_cluster = self.fake.fake_cluster_parameters(private_network)
fake_logical = self.fake.fake_logical_parameters(private_network)
fake_routers = self.fake.fake_router_parameters()
fake_network = self.fake.fake_network_parameters()
fake_cluster.update({'networking_parameters': fake_network,
'logic_networks': fake_logical,
'routers': fake_routers})
cluster_info = self.add_cluster(**fake_cluster)
cluster_meta = self.get_cluster(cluster_info.id).to_dict()
self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network)
fake_logical = self._verify_logical_params(cluster_meta, fake_logical)
self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical)
self._verify_router_params(cluster_meta)
self.assertEqual(cluster_meta.get('routers', None), fake_routers)
self.delete_cluster(cluster_info.id)
# STC-A-Daisy_Logical_Network-0011
def test_delete_all_params(self):
fake_cluster = self.fake.fake_cluster_parameters()
cluster_info = self.add_cluster(**fake_cluster)
cluster_meta = self.get_cluster(cluster_info.id).to_dict()
default_networking_parameters = {u'base_mac': None,
u'gre_id_range': [None, None],
u'net_l23_provider': None,
u'public_vip': None,
u'segmentation_type': None,
u'vlan_range': [None, None],
u'vni_range': [None, None]}
self.assertEqual(default_networking_parameters, cluster_meta.get('networking_parameters', None))
self.assertEqual([], cluster_meta.get('logic_networks', None))
self.assertEqual([], cluster_meta.get('routers', None))
self.delete_cluster(cluster_info.id)
def tearDown(self):
super(TecsLogicalNetworkTest, self).tearDown()

View File

@ -0,0 +1,152 @@
from tempest.api.daisy import base
from tempest import config
CONF = config.CONF
class DaisyServiceTest(base.BaseDaisyTest):
@classmethod
def resource_setup(cls):
super(DaisyServiceTest, cls).resource_setup()
cls.host_meta = {'name': 'test_add_host',
'description': 'test_tempest'}
cls.host_meta_interfaces = {'type': 'ether',
'name': 'eth1',
'mac': 'fe80::f816:3eff',
'ip': '10.43.177.121',
'netmask': '255.255.254.0',
'is_deployment': 'True',
'assigned_networks': ['MANAGEMENT', 'DEPLOYMENT'],
'slaves': 'eth1'}
cls.cluster_meta = {'description': 'desc',
'logic_networks': [{'name': 'external1',
'physnet_name': 'PRIVATE',
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet2'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.150'],
['172.16.1.151',
'172.16.1.254']],
'gateway': '172.16.1.1',
'name': 'subnet10'}],
'type': 'external'},
{'name': 'external2',
'physnet_name': 'PUBLIC',
'segmentation_id': 1023,
'segmentation_type': 'vxlan',
'shared': True,
'subnets': [{'cidr': '192.168.2.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.2.130',
'192.168.2.254']],
'gateway': '192.168.2.1',
'name': 'subnet123'}],
'type': 'external'},
{'name': 'internal1',
'physnet_name': 'PRIVATE',
'segmentation_id': '777',
'segmentation_type': 'vlan',
'shared': False,
'subnets': [{'cidr': '192.168.31.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.31.130',
'192.168.31.254']],
'gateway': '192.168.31.1',
'name': 'subnet3'},
{'cidr': '192.168.4.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.4.130',
'192.168.4.254']],
'gateway': '192.168.4.1',
'name': 'subnet4'}],
'type': 'internal'}],
'name': 'test',
'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00',
'gre_id_range': [2, 2000],
'net_l23_provider': 'ovs',
'public_vip': '172.16.0.3',
'segmentation_type': 'vlan,vxlan',
'vlan_range': [2, 4094],
'vni_range': [1000, 1030]},
'networks': [],
'nodes': [],
'routers': [{'description': 'router1',
'external_logic_network': 'external1',
'name': 'router1',
'subnets': ['subnet4', 'subnet3']},
{'description': 'router2',
'external_logic_network': 'external2',
'name': 'router2',
'subnets': ['subnet2', 'subnet10']}]}
cls.service_meta = {'name': 'test_service', 'description': 'test'}
def test_list_service(self):
service_meta = {}
service_flag = True
list_service = self.list_service(**service_meta)
query_service_list = [service_info for service_info in list_service]
service_list = ["lb", "ha", "mariadb", "amqp",
"ceilometer-central", "ceilometer-alarm",
"ceilometer-notification", "ceilometer-collector",
"heat-engine", "ceilometer-api", "heat-api-cfn",
"heat-api", "horizon", "neutron-metadata",
"neutron-dhcp", "neutron-server", "neutron-l3",
"keystone", "cinder-volume", "cinder-api",
"cinder-scheduler", "glance", "ironic", "compute",
"nova-cert", "nova-sched", "nova-vncproxy",
"nova-conductor", "nova-api"]
for service in service_list:
for query_service in query_service_list:
if service == query_service.name:
break
else:
service_flag = False
self.assertTrue(service_flag, "test_list_service error")
def test_add_service(self):
service = self.add_service(**self.service_meta)
self.assertEqual("test_service", service.name, "test_add_service failed")
self.delete_service(service.id)
def test_service_delete(self):
service = self.add_service(**self.service_meta)
self.delete_service(service.id)
service_flag = True
service_meta = {}
list_service = self.list_service(**service_meta)
query_service_list = [service_info for service_info in list_service]
for query_service in query_service_list:
if service.name == query_service.name:
service_flag = False
self.assertTrue(service_flag, "test_list_service error")
def test_get_service_detail(self):
add_service_info = self.add_service(**self.service_meta)
get_service = self.get_service(add_service_info.id)
self.assertEqual('test_service', get_service.name)
self.delete_service(get_service.id)
def test_update_service(self):
add_service_info = self.add_service(**self.service_meta)
update_service_meta = {'name': 'test_update_service',
'description': 'test_tempest'}
update_service_info = self.update_service(add_service_info.id, **update_service_meta)
self.assertEqual("test_update_service", update_service_info.name, "test_update_service_with_cluster failed")
self.delete_service(add_service_info.id)

1436
test/tempest/tempest/config.py Executable file

File diff suppressed because it is too large Load Diff

768
test/tempest/tempest/test.py Executable file
View File

@ -0,0 +1,768 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import json
import os
import re
import sys
import time
import urllib
import uuid
import fixtures
from oslo_log import log as logging
from oslo_utils import importutils
import six
import testscenarios
import testtools
from tempest import clients
from tempest.common import credentials
from tempest.common import fixed_network
import tempest.common.generator.valid_generator as valid
from tempest import config
from tempest import exceptions
from tempest_lib.common.utils import misc
LOG = logging.getLogger(__name__)
CONF = config.CONF
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
return f
return decorator
def idempotent_id(id):
"""Stub for metadata decorator"""
if not isinstance(id, six.string_types):
raise TypeError('Test idempotent_id must be string not %s'
'' % type(id).__name__)
uuid.UUID(id)
def decorator(f):
f = testtools.testcase.attr('id-%s' % id)(f)
if f.__doc__:
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
else:
f.__doc__ = 'Test idempotent id: %s' % id
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara,
'database': CONF.service_available.trove
}
return service_list
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
'telemetry', 'data_processing', 'database']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n" +
str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_isolated_creds (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
# NOTE(andreaf) credentials holds a list of the credentials to be allocated
# at class setup time. Credential types can be 'primary', 'alt' or 'admin'
credentials = []
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, an not before, re-raise
# the first one
if re_raise and etype is not None:
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def sdn_skip_check(cls):
if not hasattr(cls, 'is_sdn'):
BaseTestCase.is_sdn = misc.sdn_skip_check()
if BaseTestCase.is_sdn:
raise testtools.TestCase.skipException(
"skip : SDN not support this function")
@classmethod
def dvs_skip_check(cls):
if not hasattr(cls, 'is_dvs'):
BaseTestCase.is_dvs = misc.dvs_skip_check()
if BaseTestCase.is_dvs:
raise testtools.TestCase.skipException(
"skip : DVS not support this function")
@classmethod
def lvm_skip_check(cls):
if not hasattr(cls, 'is_lvm'):
BaseTestCase.is_lvm = misc.lvm_skip_check()
if BaseTestCase.is_lvm:
raise testtools.TestCase.skipException("skip : LVM " +
" not support this function")
@classmethod
def connectvm_skip_check(cls):
if not hasattr(cls, 'can_connect_vm'):
BaseTestCase.can_connect_vm =\
(CONF.network.tenant_networks_reachable or
CONF.compute.use_floatingip_for_ssh)
if not BaseTestCase.can_connect_vm:
raise testtools.TestCase.skipException("skip : no network "
"reachable to ssh vm")
@classmethod
def skip_checks(cls):
"""Class level skip checks. Subclasses verify in here all
conditions that might prevent the execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
if 'admin' in cls.credentials and not credentials.is_admin_available():
msg = "Missing Identity Admin API credentials in configuration."
raise cls.skipException(msg)
if 'alt' is cls.credentials and not credentials.is_alt_available():
msg = "Missing a 2nd set of API credentials in configuration."
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
"""Allocate credentials and the client managers from them.
A test class that requires network resources must override
setup_credentials and defined the required resources before super
is invoked.
"""
for credentials_type in cls.credentials:
# This may raise an exception in case credentials are not available
# In that case we want to let the exception through and the test
# fail accordingly
manager = cls.get_client_manager(
credential_type=credentials_type)
setattr(cls, 'os_%s' % credentials_type, manager)
# Setup some common aliases
# TODO(andreaf) The aliases below are a temporary hack
# to avoid changing too much code in one patch. They should
# be removed eventually
if credentials_type == 'primary':
cls.os = cls.manager = cls.os_primary
if credentials_type == 'admin':
cls.os_adm = cls.admin_manager = cls.os_admin
if credentials_type == 'alt':
cls.alt_manager = cls.os_alt
@classmethod
def setup_clients(cls):
"""Create links to the clients into the test object."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify which client is `client` and nothing else.
pass
@classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
Resource cleanup must be able to handle the case of partially setup
resources, in case a failure during `resource_setup` should happen.
"""
pass
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the " +
self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
@classmethod
def get_client_manager(cls, identity_version=None,
credential_type='primary'):
"""
Returns an OpenStack client manager
"""
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
identity_version = identity_version or CONF.identity.auth_version
if (not hasattr(cls, 'isolated_creds') or
not cls.isolated_creds.name == cls.__name__):
cls.isolated_creds = credentials.get_isolated_credentials(
name=cls.__name__, network_resources=cls.network_resources,
force_tenant_isolation=force_tenant_isolation,
identity_version=identity_version
)
credentials_method = 'get_%s_creds' % credential_type
if hasattr(cls.isolated_creds, credentials_method):
creds = getattr(cls.isolated_creds, credentials_method)()
else:
raise exceptions.InvalidCredentials(
"Invalid credentials type %s" % credential_type)
os = clients.Manager(credentials=creds, service=cls._service)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if hasattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not cls.network_resources:
cls.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
@classmethod
def get_tenant_network(cls):
"""Get the network to be used in testing
:return: network dict including 'id' and 'name'
"""
# Make sure isolated_creds exists and get a network client
networks_client = cls.get_client_manager().networks_client
isolated_creds = getattr(cls, 'isolated_creds', None)
# In case of nova network, isolated tenants are not able to list the
# network configured in fixed_network_name, even if the can use it
# for their servers, so using an admin network client to validate
# the network name
if (not CONF.service_available.neutron and
credentials.is_admin_available()):
admin_creds = isolated_creds.get_admin_creds()
networks_client = clients.Manager(admin_creds).networks_client
return fixed_network.get_tenant_network(isolated_creds,
networks_client)
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc),
"Expected %s, got %s" % (exc, type(e)))
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager(credential_type='primary')
cls.client = os.negative_client
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema = getattr(test, '_schema', None)
if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description):
"""
Generates the test scenario list for a given description.
:param description: A file or dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for scenario in generator.generate_scenarios(schema):
scenario_list.append((scenario['_negtest_name'],
scenario))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A json file or dictionary with the following
entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
expected_result = None
if "default_result_code" in description:
expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "_negtest_name"):
schema_under_test = \
valid.ValidTestGenerator().generate_valid(schema)
local_expected_result = \
generator.generate_payload(self, schema_under_test)
if local_expected_result is not None:
expected_result = local_expected_result
new_url, body = \
self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
if not credentials.is_admin_available():
msg = ("Missing Identity Admin API credentials in"
"configuration.")
raise self.skipException(msg)
creds = self.isolated_creds.get_admin_creds()
os_adm = clients.Manager(credentials=creds)
client = os_adm.negative_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative'])
def generic_test(self):
if hasattr(self, '_schema'):
self.execute(self._schema)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
func_name = 'test_%s' % lower_cn
setattr(klass, func_name, generic_test)
return klass
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
time.sleep(sleep_for)
now = time.time()
return False

View File

@ -0,0 +1,208 @@
#!/bin/bash
#********
# This file is used to develop tempest environment
#
# 1 please copy it to the modules you want to
# such as: cp tempest_envir_conf_install.sh tempest-master/
# 2 then run the bash, tempests environment can be developed
#
# note: this bash only support CGSLV5
#*****
log_path=logs
mkdir -p $log_path
testday=`date +"%Y-%m-%d-%H-%M-%S"`
errfile=$log_path/err-$testday.txt
Install_version=`uname -a`
Right_version="3.10"
result=$(echo $Install_version | grep "${Right_version}")
if [[ "$result" == "" ]]
then
echo "only support CGSLV5,please change your version first...">>$log_path/install_venv.err
exit 1
fi
rm -rf /etc/yum.repos.d/opencos.repo
opencos_repo=/etc/yum.repos.d/opencos.repo
echo "Create $opencos_repo ..."
echo "[opencos]">>$opencos_repo
echo "name=opencos">>$opencos_repo
echo "baseurl=http://10.43.177.17/pypi/">>$opencos_repo
echo "enabled=1">>$opencos_repo
echo "gpgcheck=0">>$opencos_repo
rm -rf ~/.pip/pip.conf
pip_config=~/.pip/pip.conf
echo "Create $pip_config ..."
if [ ! -d `dirname $pip_config` ]; then
mkdir -p `dirname $pip_config`
fi
echo "[global]">$pip_config
echo "find-links = http://10.43.177.17/pypi">>$pip_config
echo "no-index = true">>$pip_config
rm -rf ~/.pydistutils.cfg
pydistutils_cfg=~/.pydistutils.cfg
echo "Create $pydistutils_cfg ..."
echo "[easy_install]">$pydistutils_cfg
echo "index_url = http://10.43.177.17/pypi">>$pydistutils_cfg
modules=(virtualenv mariadb-devel testtools testrepository testresources fixtures python-subunit testscenarios postgresql-devel oslo.serialization oslo.utils libffi-devel
cyrus-sasl-devel sqlite-devel libxslt-devel openldap-devel)
yum clean all 1>/dev/null 2>/dev/null
# for virtual environment demand pip version>=1.6, so install it whether installed.
yum --disablerepo=* --enablerepo=opencos install -y pip extras 1>$log_path/$mod.log 2>$log_path/$mod.err
# install modules
echo "install modules">>$log_path/install_venv.log
for mod in ${modules[@]}; do
echo -n "yum install $mod ... "
already_install=`rpm -qa | grep $mod`
if [ "$already_install" == "" ]; then
yum --disablerepo=* --enablerepo=opencos install -y $mod 1>$log_path/$mod.log 2>$log_path/$mod.err
if [ -s $log_path/$mod.err ]; then
echo "fail!"
echo "Install $mod fail! Please manually using the yum installation package,commond is \"yum install $mod\"">>$log_path/install_venv.err
# exit 1
else
echo "ok(install finish)"
fi
else
echo "ok(already exist)"
fi
done
#echo "install venv ... ">>$log_path/install_venv.log
#chmod +x tools/*
#python tools/install_venv.py 1>$log_path/install_venv.log 2>$log_path/install_venv.err
#if grep "development environment setup is complete." $log_path/install_venv.log
# then
# echo "development environment setup is complete...">>$log_path/install_venv.log
#else
# echo "development environment setup is fail,please check $log_path/install_venv.err"
# cat $log_path/install_venv.err
## exit 1
#fi
echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
echo "copy tempest.conf.sample to tempest.conf....."
tempestconf=etc/tempest.conf
if [ ! -e $tempestconf ];then
cp etc/tempest.conf.sample etc/tempest.conf 2>>err.txt
fi
source /root/keystonerc_admin
#######Tempest CONF#######
#######[DEFAULT]#######
echo "config tempest.conf DEFAULT lock_path /tmp"
openstack-config --set $tempestconf DEFAULT lock_path /tmp 2>>$errfile
echo "config tempest.conf DEFAULT log_file tempest.log"
openstack-config --set $tempestconf DEFAULT log_file tempest.log 2>>$errfile
########[identity]########
if [ ! -n "`keystone user-list 2>>$errfile | grep -w Member`" ]; then
keystone user-create --name Member 2>>$errfile
fi
if [ ! -n "`keystone role-list 2>>$errfile | grep -w Member`" ]; then
keystone role-create --name Member 2>>$errfile
fi
if [ ! -n "`keystone tenant-list 2>>$errfile |grep -w demo`" ]; then
keystone tenant-create --name demo --enabled true 2>>$errfile
fi
if [ ! -n "`keystone user-list 2>>$errfile |grep -w demo`" ]; then
keystone user-create --name demo --tenant demo --pass secret --enabled true 2>>$errfile
fi
if [ ! -n "`keystone tenant-list 2>>$errfile |grep -w alt_demo`" ]; then
keystone tenant-create --name alt_demo --enabled true 2>>$errfile
fi
if [ ! -n "`keystone user-list 2>>$errfile |grep -w alt_demo`" ]; then
keystone user-create --name alt_demo --tenant alt_demo --pass secret --enabled true 2>>$errfile
fi
openstack-config --set $tempestconf identity admin_username admin 2>>$errfile
openstack-config --set $tempestconf identity admin_role admin 2>>$errfile
openstack-config --set $tempestconf identity admin_tenant_name admin 2>>$errfile
openstack-config --set $tempestconf identity admin_password keystone 2>>$errfile
openstack-config --set $tempestconf identity alt_tenant_name alt_demo 2>>$errfile
openstack-config --set $tempestconf identity alt_username alt_demo 2>>$errfile
openstack-config --set $tempestconf identity alt_password secret 2>>$errfile
openstack-config --set $tempestconf identity tenant_name demo 2>>$errfile
openstack-config --set $tempestconf identity username demo 2>>$errfile
openstack-config --set $tempestconf identity password secret 2>>$errfile
openstack-config --set $tempestconf identity auth_version v2 2>>$errfile
openstack-config --set $tempestconf identity catalog_type identity 2>>$errfile
openstack-config --set $tempestconf identity endpoint_type publicURL 2>>$errfile
openstack-config --set $tempestconf identity region RegionOne 2>>$errfile
openstack-config --set $tempestconf identity uri http://127.0.0.1:5000/v2.0/ 2>>$errfile
openstack-config --set $tempestconf identity uri_v3 http://127.0.0.1:5000/v3/ 2>>$errfile
#######[cli]#######
openstack-config --set $tempestconf cli cli_dir /usr/bin 2>>$errfile
#######[compute]#######
openstack-config --set $tempestconf compute build_timeout 300 2>>$errfile
openstack-config --set $tempestconf compute run_ssh true 2>>$errfile
openstack-config --set $tempestconf compute ssh_auth_method adminpass 2>>$errfile
openstack-config --set $tempestconf compute ssh_user cirros 2>>$errfile
openstack-config --set $tempestconf compute image_ssh_user cirros 2>>$errfile
openstack-config --set $tempestconf compute image_ssh_password cubswin:\) 2>>$errfile
if [ ! -n "`glance image-list 2>>$errfile |grep -w cirros_icehouse_test |awk '{print $2}'`" ]; then
glance image-create --name cirros_icehouse_test --is-public true --disk-format qcow2 --copy-from http://10.43.175.61:8081/files/linux/cirros-0.3.0-x86_64-disk.img 2>>$errfile
fi
if [ ! -n "`glance image-list 2>>$errfile |grep -w cirros_icehouse_test_alt |awk '{print $2}'`" ]; then
glance image-create --name cirros_icehouse_test_alt --is-public true --disk-format qcow2 --copy-from http://10.43.175.61:8081/files/linux/cirros-0.3.2-x86_64-disk.img 2>>$errfile
fi
IMAGE=`glance image-list 2>>$errfile |grep -w cirros_icehouse_test |awk -F " " '{print $2}'`
IMAGE_ALT=`glance image-list 2>>$errfile |grep -w cirros_icehouse_test_alt |awk -F " " '{print $2}'`
openstack-config --set $tempestconf compute image_ref $IMAGE 2>>$errfile
openstack-config --set $tempestconf compute image_ref_alt $IMAGE_ALT 2>>$errfile
#CONF.compute.flavor_ref
FLAVORNAME=m1.tiny
FLAVORALT=m1.small
FLAVORID=`nova flavor-list 2>>$errfile |grep -w $FLAVORNAME |awk '{print $2}'`
FLAVORALTID=`nova flavor-list 2>>$errfile |grep -w $FLAVORALT |awk '{print $2}'`
openstack-config --set $tempestconf compute flavor_ref $FLAVORID 2>>$errfile
openstack-config --set $tempestconf compute flavor_ref_alt $FLAVORALTID 2>>$errfile
#######[dashboard]#######
openstack-config --set $tempestconf dashboard dashboard_url http://localhost/dashboard/ 2>>$errfile
openstack-config --set $tempestconf dashboard login_url http://localhost/dashboard/auth/login/ 2>>$errfile
#######[service_available]#######
openstack-config --set $tempestconf service_available ceilometer false 2>>$errfile
openstack-config --set $tempestconf service_available cinder true 2>>$errfile
openstack-config --set $tempestconf service_available glance true 2>>$errfile
openstack-config --set $tempestconf service_available heat false 2>>$errfile
openstack-config --set $tempestconf service_available horizon true 2>>$errfile
openstack-config --set $tempestconf service_available ironic false 2>>$errfile
openstack-config --set $tempestconf service_available neutron true 2>>$errfile
openstack-config --set $tempestconf service_available nova true 2>>$errfile
openstack-config --set $tempestconf service_available sahara false 2>>$errfile
openstack-config --set $tempestconf service_available swift false 2>>$errfile
openstack-config --set $tempestconf service_available trove false 2>>$errfile
openstack-config --set $tempestconf service_available zaqar false 2>>$errfile
if [ -s err.txt ];then
cat err.txt
exit 1
fi
echo "tempest envirmonent and tempest.conf config successful..."
exit 0

View File

@ -0,0 +1,13 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.0
# needed for doc build
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
python-subunit>=0.0.18
oslosphinx>=2.5.0,<2.6.0 # Apache-2.0
mox>=0.5.3
mock>=1.0
coverage>=3.6
oslotest>=1.5.1,<1.6.0 # Apache-2.0
stevedore>=1.3.0,<1.4.0 # Apache-2.0

133
test/tempest/tox.ini Normal file
View File

@ -0,0 +1,133 @@
[tox]
envlist = pep8,py27
minversion = 1.6
skipsdist = True
[tempestenv]
sitepackages = False
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
deps = setuptools
-r{toxinidir}/requirements.txt
[testenv]
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/tests
usedevelop = True
install_command = pip install -U {opts} {packages}
whitelist_externals = *
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '{posargs}'
[testenv:genconfig]
commands = oslo-config-generator --config-file tools/config/config-generator.tempest.conf
[testenv:cover]
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
[testenv:all]
sitepackages = {[tempestenv]sitepackages}
# 'all' includes slow tests
setenv = {[tempestenv]setenv}
OS_TEST_TIMEOUT=1200
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '{posargs}'
[testenv:full]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
[testenv:full-serial]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
[testenv:heat-slow]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
OS_TEST_TIMEOUT=1200
deps = {[tempestenv]deps}
# The regex below is used to select heat api/scenario tests tagged as slow.
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
[testenv:large-ops]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
[testenv:smoke]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
[testenv:smoke-serial]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# This is still serial because neutron doesn't work with parallel. See:
# https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke
# job would fail if we moved it to parallel.
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}'
[testenv:stress]
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
commands =
run-tempest-stress {posargs}
[testenv:venv]
commands = {posargs}
[testenv:docs]
commands = python setup.py build_sphinx {posargs}
[testenv:pep8]
commands =
flake8 {posargs}
{toxinidir}/tools/config/check_uptodate.sh
python tools/check_uuid.py
[testenv:uuidgen]
commands =
python tools/check_uuid.py --fix
[hacking]
local-check-factory = tempest.hacking.checks.factory
import_exceptions = tempest.services
[flake8]
# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.openstack.org/#/c/36788/
# E123 skipped because it is ignored by default in the default pep8
# E129 skipped because it is too limiting when combined with other rules
# Skipped because of new hacking 0.9: H405
ignore = E125,E123,E129,H404,H405,E501
show-source = True
exclude = .git,.venv,.tox,dist,doc,openstack,*egg