From b2754090066ff1d15aa9d2f1b6d3428c620ddbb9 Mon Sep 17 00:00:00 2001 From: KongWei Date: Tue, 12 Jul 2016 00:26:43 +0000 Subject: [PATCH] Add daisy tempest. Change-Id: I5831348eaf8a427afb50836089b246a8275cd57e --- test/tempest/clients.py | 428 +++++ test/tempest/etc/tempest.conf | 1234 ++++++++++++++ test/tempest/openstack-common.conf | 8 + test/tempest/requirements.txt | 26 + test/tempest/run_tempest.sh | 146 ++ test/tempest/run_tests.sh | 150 ++ test/tempest/setup.cfg | 36 + test/tempest/setup.py | 30 + test/tempest/tempest/__init__.py | 0 test/tempest/tempest/api/README.rst | 52 + test/tempest/tempest/api/__init__.py | 0 test/tempest/tempest/api/daisy/__init__.py | 0 test/tempest/tempest/api/daisy/base.py | 562 +++++++ test/tempest/tempest/api/daisy/v1/__init__.py | 0 .../tempest/api/daisy/v1/getnodeinfo.sh | 138 ++ test/tempest/tempest/api/daisy/v1/mergeLog.py | 38 + .../api/daisy/v1/test_cinder_volume.py | 292 ++++ .../tempest/api/daisy/v1/test_cluster.py | 371 +++++ .../tempest/api/daisy/v1/test_component.py | 143 ++ .../tempest/api/daisy/v1/test_config_file.py | 70 + .../api/daisy/v1/test_discover_host.py | 121 ++ test/tempest/tempest/api/daisy/v1/test_hwm.py | 61 + .../api/daisy/v1/test_logical_network.py | 273 ++++ .../tempest/api/daisy/v1/test_service.py | 152 ++ test/tempest/tempest/config.py | 1436 +++++++++++++++++ test/tempest/tempest/test.py | 768 +++++++++ test/tempest/tempest_envir_conf_install.sh | 208 +++ test/tempest/test-requirements.txt | 13 + test/tempest/tox.ini | 133 ++ 29 files changed, 6889 insertions(+) create mode 100644 test/tempest/clients.py create mode 100755 test/tempest/etc/tempest.conf create mode 100644 test/tempest/openstack-common.conf create mode 100644 test/tempest/requirements.txt create mode 100644 test/tempest/run_tempest.sh create mode 100644 test/tempest/run_tests.sh create mode 100644 test/tempest/setup.cfg create mode 100644 test/tempest/setup.py create mode 100644 test/tempest/tempest/__init__.py create mode 100644 test/tempest/tempest/api/README.rst create mode 100644 test/tempest/tempest/api/__init__.py create mode 100644 test/tempest/tempest/api/daisy/__init__.py create mode 100644 test/tempest/tempest/api/daisy/base.py create mode 100644 test/tempest/tempest/api/daisy/v1/__init__.py create mode 100755 test/tempest/tempest/api/daisy/v1/getnodeinfo.sh create mode 100755 test/tempest/tempest/api/daisy/v1/mergeLog.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_cinder_volume.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_cluster.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_component.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_config_file.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_discover_host.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_hwm.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_logical_network.py create mode 100755 test/tempest/tempest/api/daisy/v1/test_service.py create mode 100755 test/tempest/tempest/config.py create mode 100755 test/tempest/tempest/test.py create mode 100755 test/tempest/tempest_envir_conf_install.sh create mode 100644 test/tempest/test-requirements.txt create mode 100644 test/tempest/tox.ini diff --git a/test/tempest/clients.py b/test/tempest/clients.py new file mode 100644 index 00000000..e1b6eab1 --- /dev/null +++ b/test/tempest/clients.py @@ -0,0 +1,428 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_log import log as logging + +from tempest.common import cred_provider +from tempest.common import negative_rest_client +from tempest import config +from tempest import manager +from tempest.services.baremetal.v1.json.baremetal_client import \ + BaremetalClientJSON +from tempest.services import botoclients +from tempest.services.compute.json.agents_client import \ + AgentsClientJSON +from tempest.services.compute.json.aggregates_client import \ + AggregatesClientJSON +from tempest.services.compute.json.availability_zone_client import \ + AvailabilityZoneClientJSON +from tempest.services.compute.json.baremetal_nodes_client import \ + BaremetalNodesClientJSON +from tempest.services.compute.json.certificates_client import \ + CertificatesClientJSON +from tempest.services.compute.json.extensions_client import \ + ExtensionsClientJSON +from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON +from tempest.services.compute.json.flavors_client import FlavorsClientJSON +from tempest.services.compute.json.floating_ips_client import \ + FloatingIPsClientJSON +from tempest.services.compute.json.hosts_client import HostsClientJSON +from tempest.services.compute.json.hypervisor_client import \ + HypervisorClientJSON +from tempest.services.compute.json.images_client import ImagesClientJSON +from tempest.services.compute.json.instance_usage_audit_log_client import \ + InstanceUsagesAuditLogClientJSON +from tempest.services.compute.json.interfaces_client import \ + InterfacesClientJSON +from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON +from tempest.services.compute.json.limits_client import LimitsClientJSON +from tempest.services.compute.json.migrations_client import \ + MigrationsClientJSON +from tempest.services.compute.json.networks_client import NetworksClientJSON +from tempest.services.compute.json.quotas_client import QuotaClassesClientJSON +from tempest.services.compute.json.quotas_client import QuotasClientJSON +from tempest.services.compute.json.security_group_default_rules_client import \ + SecurityGroupDefaultRulesClientJSON +from tempest.services.compute.json.security_groups_client import \ + SecurityGroupsClientJSON +from tempest.services.compute.json.servers_client import ServersClientJSON +from tempest.services.compute.json.services_client import ServicesClientJSON +from tempest.services.compute.json.tenant_networks_client import \ + TenantNetworksClientJSON +from tempest.services.compute.json.tenant_usages_client import \ + TenantUsagesClientJSON +from tempest.services.compute.json.volumes_extensions_client import \ + VolumesExtensionsClientJSON +from tempest.services.data_processing.v1_1.data_processing_client import \ + DataProcessingClient +from tempest.services.database.json.flavors_client import \ + DatabaseFlavorsClientJSON +from tempest.services.database.json.limits_client import \ + DatabaseLimitsClientJSON +from tempest.services.database.json.versions_client import \ + DatabaseVersionsClientJSON +from tempest.services.identity.v2.json.identity_client import \ + IdentityClientJSON +from tempest.services.identity.v2.json.token_client import TokenClientJSON +from tempest.services.identity.v3.json.credentials_client import \ + CredentialsClientJSON +from tempest.services.identity.v3.json.endpoints_client import \ + EndPointClientJSON +from tempest.services.identity.v3.json.identity_client import \ + IdentityV3ClientJSON +from tempest.services.identity.v3.json.policy_client import PolicyClientJSON +from tempest.services.identity.v3.json.region_client import RegionClientJSON +from tempest.services.identity.v3.json.service_client import \ + ServiceClientJSON +from tempest.services.identity.v3.json.token_client import V3TokenClientJSON +from tempest.services.image.v1.json.image_client import ImageClientJSON +from tempest.services.image.v2.json.image_client import ImageClientV2JSON +from tempest.services.messaging.json.messaging_client import \ + MessagingClientJSON +from tempest.services.network.json.network_client import NetworkClientJSON +from tempest.services.object_storage.account_client import AccountClient +from tempest.services.object_storage.container_client import ContainerClient +from tempest.services.object_storage.object_client import ObjectClient +from tempest.services.orchestration.json.orchestration_client import \ + OrchestrationClient +from tempest.services.telemetry.json.telemetry_client import \ + TelemetryClientJSON +from tempest.services.volume.json.admin.volume_hosts_client import \ + VolumeHostsClientJSON +from tempest.services.volume.json.admin.volume_quotas_client import \ + VolumeQuotasClientJSON +from tempest.services.volume.json.admin.volume_services_client import \ + VolumesServicesClientJSON +from tempest.services.volume.json.admin.volume_types_client import \ + VolumeTypesClientJSON +from tempest.services.volume.json.availability_zone_client import \ + VolumeAvailabilityZoneClientJSON +from tempest.services.volume.json.backups_client import BackupsClientJSON +from tempest.services.volume.json.extensions_client import \ + ExtensionsClientJSON as VolumeExtensionClientJSON +from tempest.services.volume.json.qos_client import QosSpecsClientJSON +from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON +from tempest.services.volume.json.volumes_client import VolumesClientJSON +from tempest.services.volume.v2.json.admin.volume_hosts_client import \ + VolumeHostsV2ClientJSON +from tempest.services.volume.v2.json.admin.volume_quotas_client import \ + VolumeQuotasV2Client +from tempest.services.volume.v2.json.admin.volume_services_client import \ + VolumesServicesV2ClientJSON +from tempest.services.volume.v2.json.admin.volume_types_client import \ + VolumeTypesV2ClientJSON +from tempest.services.volume.v2.json.availability_zone_client import \ + VolumeV2AvailabilityZoneClientJSON +from tempest.services.volume.v2.json.backups_client import BackupsClientV2JSON +from tempest.services.volume.v2.json.extensions_client import \ + ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON +from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON +from tempest.services.volume.v2.json.snapshots_client import \ + SnapshotsV2ClientJSON +from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +class Manager(manager.Manager): + + """ + Top level manager for OpenStack tempest clients + """ + + default_params = { + 'disable_ssl_certificate_validation': + CONF.identity.disable_ssl_certificate_validation, + 'ca_certs': CONF.identity.ca_certificates_file, + 'trace_requests': CONF.debug.trace_requests + } + + # NOTE: Tempest uses timeout values of compute API if project specific + # timeout values don't exist. + default_params_with_timeout_values = { + 'build_interval': CONF.compute.build_interval, + 'build_timeout': CONF.compute.build_timeout + } + default_params_with_timeout_values.update(default_params) + + def __init__(self, credentials=None, service=None): + super(Manager, self).__init__(credentials=credentials) + + self._set_compute_clients() + self._set_database_clients() + self._set_identity_clients() + self._set_volume_clients() + self._set_object_storage_clients() + + self.baremetal_client = BaremetalClientJSON( + self.auth_provider, + CONF.baremetal.catalog_type, + CONF.identity.region, + endpoint_type=CONF.baremetal.endpoint_type, + **self.default_params_with_timeout_values) + self.network_client = NetworkClientJSON( + self.auth_provider, + CONF.network.catalog_type, + CONF.network.region or CONF.identity.region, + endpoint_type=CONF.network.endpoint_type, + build_interval=CONF.network.build_interval, + build_timeout=CONF.network.build_timeout, + **self.default_params) + self.messaging_client = MessagingClientJSON( + self.auth_provider, + CONF.messaging.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + if CONF.service_available.ceilometer: + self.telemetry_client = TelemetryClientJSON( + self.auth_provider, + CONF.telemetry.catalog_type, + CONF.identity.region, + endpoint_type=CONF.telemetry.endpoint_type, + **self.default_params_with_timeout_values) + if CONF.service_available.glance: + self.image_client = ImageClientJSON( + self.auth_provider, + CONF.image.catalog_type, + CONF.image.region or CONF.identity.region, + endpoint_type=CONF.image.endpoint_type, + build_interval=CONF.image.build_interval, + build_timeout=CONF.image.build_timeout, + **self.default_params) + self.image_client_v2 = ImageClientV2JSON( + self.auth_provider, + CONF.image.catalog_type, + CONF.image.region or CONF.identity.region, + endpoint_type=CONF.image.endpoint_type, + build_interval=CONF.image.build_interval, + build_timeout=CONF.image.build_timeout, + **self.default_params) + self.orchestration_client = OrchestrationClient( + self.auth_provider, + CONF.orchestration.catalog_type, + CONF.orchestration.region or CONF.identity.region, + endpoint_type=CONF.orchestration.endpoint_type, + build_interval=CONF.orchestration.build_interval, + build_timeout=CONF.orchestration.build_timeout, + **self.default_params) + self.data_processing_client = DataProcessingClient( + self.auth_provider, + CONF.data_processing.catalog_type, + CONF.identity.region, + endpoint_type=CONF.data_processing.endpoint_type, + **self.default_params_with_timeout_values) + self.negative_client = negative_rest_client.NegativeRestClient( + self.auth_provider, service, **self.default_params) + + # Generating EC2 credentials in tempest is only supported + # with identity v2 + if CONF.identity_feature_enabled.api_v2 and \ + CONF.identity.auth_version == 'v2': + # EC2 and S3 clients, if used, will check onfigured AWS credentials + # and generate new ones if needed + self.ec2api_client = botoclients.APIClientEC2(self.identity_client) + self.s3_client = botoclients.ObjectClientS3(self.identity_client) + + def _set_compute_clients(self): + params = { + 'service': CONF.compute.catalog_type, + 'region': CONF.compute.region or CONF.identity.region, + 'endpoint_type': CONF.compute.endpoint_type, + 'build_interval': CONF.compute.build_interval, + 'build_timeout': CONF.compute.build_timeout + } + params.update(self.default_params) + + self.agents_client = AgentsClientJSON(self.auth_provider, **params) + self.networks_client = NetworksClientJSON(self.auth_provider, **params) + self.migrations_client = MigrationsClientJSON(self.auth_provider, + **params) + self.security_group_default_rules_client = ( + SecurityGroupDefaultRulesClientJSON(self.auth_provider, **params)) + self.certificates_client = CertificatesClientJSON(self.auth_provider, + **params) + self.servers_client = ServersClientJSON( + self.auth_provider, + enable_instance_password=CONF.compute_feature_enabled + .enable_instance_password, + **params) + self.limits_client = LimitsClientJSON(self.auth_provider, **params) + self.images_client = ImagesClientJSON(self.auth_provider, **params) + self.keypairs_client = KeyPairsClientJSON(self.auth_provider, **params) + self.quotas_client = QuotasClientJSON(self.auth_provider, **params) + self.quota_classes_client = QuotaClassesClientJSON(self.auth_provider, + **params) + self.flavors_client = FlavorsClientJSON(self.auth_provider, **params) + self.extensions_client = ExtensionsClientJSON(self.auth_provider, + **params) + self.floating_ips_client = FloatingIPsClientJSON(self.auth_provider, + **params) + self.security_groups_client = SecurityGroupsClientJSON( + self.auth_provider, **params) + self.interfaces_client = InterfacesClientJSON(self.auth_provider, + **params) + self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider, + **params) + self.availability_zone_client = AvailabilityZoneClientJSON( + self.auth_provider, **params) + self.aggregates_client = AggregatesClientJSON(self.auth_provider, + **params) + self.services_client = ServicesClientJSON(self.auth_provider, **params) + self.tenant_usages_client = TenantUsagesClientJSON(self.auth_provider, + **params) + self.hosts_client = HostsClientJSON(self.auth_provider, **params) + self.hypervisor_client = HypervisorClientJSON(self.auth_provider, + **params) + self.instance_usages_audit_log_client = \ + InstanceUsagesAuditLogClientJSON(self.auth_provider, **params) + self.tenant_networks_client = \ + TenantNetworksClientJSON(self.auth_provider, **params) + self.baremetal_nodes_client = BaremetalNodesClientJSON( + self.auth_provider, **params) + + # NOTE: The following client needs special timeout values because + # the API is a proxy for the other component. + params_volume = copy.deepcopy(params) + params_volume.update({ + 'build_interval': CONF.volume.build_interval, + 'build_timeout': CONF.volume.build_timeout + }) + self.volumes_extensions_client = VolumesExtensionsClientJSON( + self.auth_provider, default_volume_size=CONF.volume.volume_size, + **params_volume) + + def _set_database_clients(self): + self.database_flavors_client = DatabaseFlavorsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + self.database_limits_client = DatabaseLimitsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + self.database_versions_client = DatabaseVersionsClientJSON( + self.auth_provider, + CONF.database.catalog_type, + CONF.identity.region, + **self.default_params_with_timeout_values) + + def _set_identity_clients(self): + params = { + 'service': CONF.identity.catalog_type, + 'region': CONF.identity.region, + 'endpoint_type': 'adminURL' + } + params.update(self.default_params_with_timeout_values) + + self.identity_client = IdentityClientJSON(self.auth_provider, + **params) + self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider, + **params) + self.endpoints_client = EndPointClientJSON(self.auth_provider, + **params) + self.service_client = ServiceClientJSON(self.auth_provider, **params) + self.policy_client = PolicyClientJSON(self.auth_provider, **params) + self.region_client = RegionClientJSON(self.auth_provider, **params) + self.credentials_client = CredentialsClientJSON(self.auth_provider, + **params) + # Token clients do not use the catalog. They only need default_params. + self.token_client = TokenClientJSON(CONF.identity.uri, + **self.default_params) + if CONF.identity_feature_enabled.api_v3: + self.token_v3_client = V3TokenClientJSON(CONF.identity.uri_v3, + **self.default_params) + + def _set_volume_clients(self): + params = { + 'service': CONF.volume.catalog_type, + 'region': CONF.volume.region or CONF.identity.region, + 'endpoint_type': CONF.volume.endpoint_type, + 'build_interval': CONF.volume.build_interval, + 'build_timeout': CONF.volume.build_timeout + } + params.update(self.default_params) + + self.volume_qos_client = QosSpecsClientJSON(self.auth_provider, + **params) + self.volume_qos_v2_client = QosSpecsV2ClientJSON( + self.auth_provider, **params) + self.volume_services_v2_client = VolumesServicesV2ClientJSON( + self.auth_provider, **params) + self.backups_client = BackupsClientJSON(self.auth_provider, **params) + self.backups_v2_client = BackupsClientV2JSON(self.auth_provider, + **params) + self.snapshots_client = SnapshotsClientJSON(self.auth_provider, + **params) + self.snapshots_v2_client = SnapshotsV2ClientJSON(self.auth_provider, + **params) + self.volumes_client = VolumesClientJSON( + self.auth_provider, default_volume_size=CONF.volume.volume_size, + **params) + self.volumes_v2_client = VolumesV2ClientJSON( + self.auth_provider, default_volume_size=CONF.volume.volume_size, + **params) + self.volume_types_client = VolumeTypesClientJSON(self.auth_provider, + **params) + self.volume_services_client = VolumesServicesClientJSON( + self.auth_provider, **params) + self.volume_hosts_client = VolumeHostsClientJSON(self.auth_provider, + **params) + self.volume_hosts_v2_client = VolumeHostsV2ClientJSON( + self.auth_provider, **params) + self.volume_quotas_client = VolumeQuotasClientJSON(self.auth_provider, + **params) + self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider, + **params) + self.volumes_extension_client = VolumeExtensionClientJSON( + self.auth_provider, **params) + self.volumes_v2_extension_client = VolumeV2ExtensionClientJSON( + self.auth_provider, **params) + self.volume_availability_zone_client = \ + VolumeAvailabilityZoneClientJSON(self.auth_provider, **params) + self.volume_v2_availability_zone_client = \ + VolumeV2AvailabilityZoneClientJSON(self.auth_provider, **params) + self.volume_types_v2_client = VolumeTypesV2ClientJSON( + self.auth_provider, **params) + + def _set_object_storage_clients(self): + params = { + 'service': CONF.object_storage.catalog_type, + 'region': CONF.object_storage.region or CONF.identity.region, + 'endpoint_type': CONF.object_storage.endpoint_type + } + params.update(self.default_params_with_timeout_values) + + self.account_client = AccountClient(self.auth_provider, **params) + self.container_client = ContainerClient(self.auth_provider, **params) + self.object_client = ObjectClient(self.auth_provider, **params) + + +class AdminManager(Manager): + + """ + Manager object that uses the admin credentials for its + managed client objects + """ + + def __init__(self, service=None): + super(AdminManager, self).__init__( + credentials=cred_provider.get_configured_credentials( + 'identity_admin'), + service=service) diff --git a/test/tempest/etc/tempest.conf b/test/tempest/etc/tempest.conf new file mode 100755 index 00000000..ec565da4 --- /dev/null +++ b/test/tempest/etc/tempest.conf @@ -0,0 +1,1234 @@ +[oslo_concurrency] +lock_path = /tmp + +[DEFAULT] +#lock_path = /tmp +log_file = tempest.log +lock_path = /tmp + +# +# From tempest.config +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. (string value) +#lock_path = + +# +# From tempest.config +# + +# Print debugging output (set logging level to DEBUG instead of +# default WARNING level). (boolean value) +#debug = false + +# Print more verbose output (set logging level to INFO instead of +# default WARNING level). (boolean value) +#verbose = false + +# +# From tempest.config +# + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Format string for %%(asctime)s in log records. Default: %(default)s +# . (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) The base directory used for relative --log-file paths. +# (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# (Optional) Name of log file to output to. If no default is set, +# logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# DEPRECATED. A logging.Formatter log message format string which may +# use any of the available logging.LogRecord attributes. This option +# is deprecated. Please use logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format = + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Use syslog for logging. Existing syslog format is DEPRECATED during +# I, and will change in J to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If +# enabled, prefixes the MSG part of the syslog message with APP-NAME +# (RFC5424). The format without the APP-NAME is deprecated in I, and +# will be removed in J. (boolean value) +#use_syslog_rfc_format = false + +# +# From tempest.config +# + +# Log output to standard error. (boolean value) +#use_stderr = true + +# +# From tempest.config +# + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Format string to use for log messages without context. (string +# value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + + +[auth] + +# +# From tempest.config +# + +# Allows test cases to create/destroy tenants and users. This option +# requires that OpenStack Identity API admin credentials are known. If +# false, isolated test cases and parallel execution, can still be +# achieved configuring a list of test accounts (boolean value) +# Deprecated group/name - [compute]/allow_tenant_isolation +# Deprecated group/name - [orchestration]/allow_tenant_isolation +#allow_tenant_isolation = false + +# If set to True it enables the Accounts provider, which locks +# credentials to allow for parallel execution with pre-provisioned +# accounts. It can only be used to run tests that ensure credentials +# cleanup happens. It requires at least `2 * CONC` distinct accounts +# configured in `test_accounts_file`, with CONC == the number of +# concurrent test processes. (boolean value) +#locking_credentials_provider = false + +# Path to the yaml file that contains the list of credentials to use +# for running tests (string value) +#test_accounts_file = etc/accounts.yaml + + +[baremetal] + +# +# From tempest.config +# + +# Timeout for Ironic node to completely provision (integer value) +#active_timeout = 300 + +# Timeout for association of Nova instance and Ironic node (integer +# value) +#association_timeout = 30 + +# Catalog type of the baremetal provisioning service (string value) +#catalog_type = baremetal + +# Driver name which Ironic uses (string value) +#driver = fake + +# Whether the Ironic nova-compute driver is enabled (boolean value) +#driver_enabled = false + +# The endpoint type to use for the baremetal provisioning service +# (string value) +#endpoint_type = publicURL + +# Timeout for Ironic power transitions. (integer value) +#power_timeout = 60 + +# Timeout for unprovisioning an Ironic node. (integer value) +#unprovision_timeout = 60 + + +[boto] + +# +# From tempest.config +# + +# AKI Kernel Image manifest (string value) +#aki_manifest = cirros-0.3.0-x86_64-vmlinuz.manifest.xml + +# AMI Machine Image manifest (string value) +#ami_manifest = cirros-0.3.0-x86_64-blank.img.manifest.xml + +# ARI Ramdisk Image manifest (string value) +#ari_manifest = cirros-0.3.0-x86_64-initrd.manifest.xml + +# AWS Access Key (string value) +#aws_access = + +# AWS Secret Key (string value) +#aws_secret = + +# AWS Zone for EC2 tests (string value) +#aws_zone = nova + +# Status Change Test Interval (integer value) +#build_interval = 1 + +# Status Change Timeout (integer value) +#build_timeout = 60 + +# EC2 URL (string value) +#ec2_url = http://localhost:8773/services/Cloud + +# boto Http socket timeout (integer value) +#http_socket_timeout = 3 + +# Instance type (string value) +#instance_type = m1.tiny + +# boto num_retries on error (integer value) +#num_retries = 1 + +# S3 Materials Path (string value) +#s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.0 + +# S3 URL (string value) +#s3_url = http://localhost:8080 + + +[cli] +cli_dir = /usr/bin + +# +# From tempest.config +# + +# directory where python client binaries are located (string value) +#cli_dir = /usr/local/bin + +# enable cli tests (boolean value) +#enabled = true + +# Whether the tempest run location has access to the *-manage +# commands. In a pure blackbox environment it will not. (boolean +# value) +#has_manage = true + +# Number of seconds to wait on a CLI timeout (integer value) +#timeout = 15 + + +[compute] +build_timeout = 300 +run_ssh = true +ssh_auth_method = adminpass +ssh_user = cirros +image_ssh_user = cirros +image_ssh_password = cubswin:) +image_ref = +image_ref_alt = +flavor_ref = +flavor_ref_alt = +use_floatingip_for_ssh = true +fixed_network_name = net1 +fixed_network_name_external = big-net + +#VMB_V05.01.10_1128Patch_MGW_20150309_serial +image_ref_vmb = fc9e4f80-6133-4b25-b81b-3eafcea621fc +image_ssh_vmb_user = zte +image_ssh_vmb_pwd = zte + +#winxp +image_ref_xp = 1e48ce15-f4db-46bf-9a30-01074a81f666 +image_ssh_xp_user = openstack +image_ssh_xp_pwd = 1234 + +#linux-cgslv5 cgslv5_x86_64 +image_ref_linux = 9c6e73d5-99ed-4b19-a9f3-f79f6272d2b9 +image_ssh_linux_user = root +image_ssh_linux_pwd = ossdbg1 + +#suse +image_ref_suse = e9f6f3e7-cca5-4cb7-b955-4eb49cf147cf +image_ssh_suse_user = root +image_ssh_suse_pwd = it@123456 + +#centos7 +image_ref_centos7 = 64fe48eb-5179-4dcf-a08a-797370f130d7 +image_ssh_centos7_user = root +image_ssh_centos7_pwd = ossdbg1 + +#is sdn , default is False +use_sdn = False + +# +# From tempest.config +# + +# Time in seconds between build status checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for an instance to build. Other services +# that do not define build_timeout will inherit this value, for +# example the image service. (integer value) +#build_timeout = 300 + +# Catalog type of the Compute service. (string value) +#catalog_type = compute + +# The endpoint type to use for the compute service. (string value) +#endpoint_type = publicURL + +# Name of the fixed network that is visible to all test tenants. +# (string value) +#fixed_network_name = private + +# Valid primary flavor to use in tests. (string value) +#flavor_ref = 1 + +# Valid secondary flavor to be used in tests. (string value) +#flavor_ref_alt = 2 + +# Unallocated floating IP range, which will be used to test the +# floating IP bulk feature for CRUD operation. This block must not +# overlap an existing floating IP pool. (string value) +#floating_ip_range = 10.0.0.0/29 + +# Password used to authenticate to an instance using the alternate +# image. (string value) +#image_alt_ssh_password = password + +# User name used to authenticate to an instance using the alternate +# image. (string value) +#image_alt_ssh_user = root + +# Valid primary image reference to be used in tests. This is a +# required option (string value) +#image_ref = + +# Valid secondary image reference to be used in tests. This is a +# required option, but if only one image is available duplicate the +# value of image_ref above (string value) +#image_ref_alt = + +# Password used to authenticate to an instance. (string value) +#image_ssh_password = password + +# User name used to authenticate to an instance. (string value) +#image_ssh_user = root + +# IP version used for SSH connections. (integer value) +#ip_version_for_ssh = 4 + +# Network used for SSH connections. Ignored if +# use_floatingip_for_ssh=true or run_ssh=false. (string value) +#network_for_ssh = public + +# Path to a private key file for SSH access to remote hosts (string +# value) +#path_to_private_key = + +# Timeout in seconds to wait for ping to succeed. (integer value) +#ping_timeout = 120 + +# Additional wait time for clean state, when there is no OS-EXT-STS +# extension available (integer value) +#ready_wait = 0 + +# The compute region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# Should the tests ssh to instances? (boolean value) +#run_ssh = false + +# Time in seconds before a shelved instance is eligible for removing +# from a host. -1 never offload, 0 offload when shelved. This time +# should be the same as the time of nova.conf, and some tests will run +# for as long as the time. (integer value) +#shelved_offload_time = 0 + +# Auth method used for authenticate to the instance. Valid choices +# are: keypair, configured, adminpass. keypair: start the servers with +# an ssh keypair. configured: use the configured user and password. +# adminpass: use the injected adminPass. disabled: avoid using ssh +# when it is an option. (string value) +#ssh_auth_method = keypair + +# Timeout in seconds to wait for output from ssh channel. (integer +# value) +#ssh_channel_timeout = 60 + +# How to connect to the instance? fixed: using the first ip belongs +# the fixed network floating: creating and using a floating ip (string +# value) +#ssh_connect_method = fixed + +# Timeout in seconds to wait for authentication to succeed. (integer +# value) +#ssh_timeout = 300 + +# User name used to authenticate to an instance. (string value) +#ssh_user = root + +# Does SSH use Floating IPs? (boolean value) +#use_floatingip_for_ssh = true + +# Expected device name when a volume is attached to an instance +# (string value) +#volume_device_name = vdb + + +[compute-feature-enabled] +resize = true +live_migration = false +vnc_console = true +api_v3 = false +interface_attach = false + +# +# From tempest.config +# + +# A list of enabled compute extensions with a special entry all which +# indicates every extension is enabled. Each extension should be +# specified with alias name. Empty list indicates all extensions are +# disabled (list value) +#api_extensions = all + +# Does the test environment block migration support cinder iSCSI +# volumes (boolean value) +#block_migrate_cinder_iscsi = false + +# Does the test environment use block devices for live migration +# (boolean value) +#block_migration_for_live_migration = false + +# Does the test environment support changing the admin password? +# (boolean value) +#change_password = false + +# Does the test environment support obtaining instance serial console +# output? (boolean value) +#console_output = true + +# If false, skip disk config tests (boolean value) +#disk_config = true + +# Enables returning of the instance password by the relevant server +# API calls such as create, rebuild or rescue. (boolean value) +#enable_instance_password = true + +# Does the test environment support dynamic network interface +# attachment? (boolean value) +#interface_attach = true + +# Does the test environment support live migration available? (boolean +# value) +#live_migration = true + +# Does the test environment support pausing? (boolean value) +#pause = true + +# Enable RDP console. This configuration value should be same as +# [nova.rdp]->enabled in nova.conf (boolean value) +#rdp_console = false + +# Does the test environment support instance rescue mode? (boolean +# value) +#rescue = true + +# Does the test environment support resizing? (boolean value) +#resize = false + +# Does the test environment support shelving/unshelving? (boolean +# value) +#shelve = true + +# Does the test environment support creating snapshot images of +# running instances? (boolean value) +#snapshot = true + +# Enable Spice console. This configuration value should be same as +# [nova.spice]->enabled in nova.conf (boolean value) +#spice_console = false + +# Does the test environment support suspend/resume? (boolean value) +#suspend = true + +# Enable VNC console. This configuration value should be same as +# [nova.vnc]->vnc_enabled in nova.conf (boolean value) +#vnc_console = false + + +[dashboard] +dashboard_url = http://localhost/dashboard/ +login_url = http://localhost/dashboard/auth/login/ + +# +# From tempest.config +# + +# Where the dashboard can be found (string value) +#dashboard_url = http://localhost/ + +# Login page for the dashboard (string value) +#login_url = http://localhost/auth/login/ + + +[data_processing] + +# +# From tempest.config +# + +# Catalog type of the data processing service. (string value) +#catalog_type = data_processing + +# The endpoint type to use for the data processing service. (string +# value) +#endpoint_type = publicURL + + +[database] + +# +# From tempest.config +# + +# Catalog type of the Database service. (string value) +#catalog_type = database + +# Current database version to use in database tests. (string value) +#db_current_version = v1.0 + +# Valid primary flavor to use in database tests. (string value) +#db_flavor_ref = 1 + + +[debug] + +# +# From tempest.config +# + +# A regex to determine which requests should be traced. This is a +# regex to match the caller for rest client requests to be able to +# selectively trace calls out of specific classes and methods. It +# largely exists for test development, and is not expected to be used +# in a real deploy of tempest. This will be matched against the +# discovered ClassName:method in the test environment. Expected +# values for this field are: * ClassName:test_method_name - traces +# one test_method * ClassName:setUp(Class) - traces specific setup +# functions * ClassName:tearDown(Class) - traces specific teardown +# functions * ClassName:_run_cleanups - traces the cleanup functions +# If nothing is specified, this feature is not enabled. To trace +# everything specify .* as the regex. (string value) +#trace_requests = + + +[identity] +admin_username = admin +admin_role = admin +admin_tenant_name = admin +admin_password = keystone +alt_tenant_name = alt_demo +alt_username = alt_demo +alt_password = secret +tenant_name = demo +username = demo +password = secret +auth_version = v2 +catalog_type = identity +endpoint_type = publicURL +region = RegionOne +uri = http://127.0.0.1:5000/v2.0/ +uri_v3 = http://127.0.0.1:5000/v3/ + +# +# From tempest.config +# + +# Admin domain name for authentication (Keystone V3).The same domain +# applies to user and project (string value) +#admin_domain_name = + +# API key to use when authenticating as admin. (string value) +#admin_password = + +# Role required to administrate keystone. (string value) +#admin_role = admin + +# Administrative Tenant name to use for Keystone API requests. (string +# value) +#admin_tenant_name = + +# Administrative Username to use for Keystone API requests. (string +# value) +#admin_username = + +# Alternate domain name for authentication (Keystone V3).The same +# domain applies to user and project (string value) +#alt_domain_name = + +# API key to use when authenticating as alternate user. (string value) +#alt_password = + +# Alternate user's Tenant name to use for Nova API requests. (string +# value) +#alt_tenant_name = + +# Username of alternate user to use for Nova API requests. (string +# value) +#alt_username = + +# Identity API version to be used for authentication for API tests. +# (string value) +#auth_version = v2 + +# Specify a CA bundle file to use in verifying a TLS (https) server +# certificate. (string value) +#ca_certificates_file = + +# Catalog type of the Identity service. (string value) +#catalog_type = identity + +# Set to True if using self-signed SSL certificates. (boolean value) +#disable_ssl_certificate_validation = false + +# Domain name for authentication (Keystone V3).The same domain applies +# to user and project (string value) +#domain_name = + +# The endpoint type to use for the identity service. (string value) +#endpoint_type = publicURL + +# API key to use when authenticating. (string value) +#password = + +# The identity region name to use. Also used as the other services' +# region name unless they are set explicitly. If no such region is +# found in the service catalog, the first found one is used. (string +# value) +#region = RegionOne + +# Tenant name to use for Nova API requests. (string value) +#tenant_name = + +# Full URI of the OpenStack Identity API (Keystone), v2 (string value) +#uri = + +# Full URI of the OpenStack Identity API (Keystone), v3 (string value) +#uri_v3 = + +# Username to use for Nova API requests. (string value) +#username = + + +[identity-feature-enabled] + +# +# From tempest.config +# + +# Is the v2 identity API enabled (boolean value) +#api_v2 = true + +# Is the v3 identity API enabled (boolean value) +#api_v3 = true + +# Does the identity service have delegation and impersonation enabled +# (boolean value) +#trust = true + + +[image] +http_image = http://10.43.211.171/images/cirros-0.3.1-x86_64-disk.img +build_interval = 1 +build_timeout = 180 + +# +# From tempest.config +# + +# Catalog type of the Image service. (string value) +#catalog_type = image + +# The endpoint type to use for the image service. (string value) +#endpoint_type = publicURL + +# http accessible image (string value) +#http_image = http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz + +# The image region name to use. If empty, the value of identity.region +# is used instead. If no such region is found in the service catalog, +# the first found one is used. (string value) +#region = + + +[image-feature-enabled] + +# +# From tempest.config +# + +# Is the v1 image API enabled (boolean value) +#api_v1 = true + +# Is the v2 image API enabled (boolean value) +#api_v2 = true + + +[input-scenario] + +# +# From tempest.config +# + +# Matching flavors become parameters for scenario tests (string value) +#flavor_regex = ^m1.nano$ + +# Matching images become parameters for scenario tests (string value) +#image_regex = ^cirros-0.3.1-x86_64-uec$ + +# SSH verification in tests is skippedfor matching images (string +# value) +#non_ssh_image_regex = ^.*[Ww]in.*$ + +# List of user mapped to regex to matching image names. (string value) +#ssh_user_regex = [["^.*[Cc]irros.*$", "root"]] + + +[messaging] + +# +# From tempest.config +# + +# Catalog type of the Messaging service. (string value) +#catalog_type = messaging + +# The maximum grace period for a claim (integer value) +#max_claim_grace = 43200 + +# The maximum ttl for a claim (integer value) +#max_claim_ttl = 43200 + +# The maximum size of a message body (integer value) +#max_message_size = 262144 + +# The maximum ttl for a message (integer value) +#max_message_ttl = 1209600 + +# The maximum number of messages per claim (integer value) +#max_messages_per_claim = 20 + +# The maximum number of queue message per page when listing (or) +# posting messages (integer value) +#max_messages_per_page = 20 + +# The maximum metadata size for a queue (integer value) +#max_queue_metadata = 65536 + +# The maximum number of queue records per page when listing queues +# (integer value) +#max_queues_per_page = 20 + + +[negative] + +# +# From tempest.config +# + +# Test generator class for all negative tests (string value) +#test_generator = tempest.common.generator.negative_generator.NegativeTestGenerator + + +[network] +internal_network_id = 84b96c34-de5f-462e-96cb-ab36a07a3d18 +public_network_id = f968caee-ba4d-44ab-a9b3-2de9496ca98d +tenant_network_cidr = 192.168.0.0/24 +tenant_network_mask_bits = 28 +tenant_network_v6_cidr = 2003::/48 +tenant_network_v6_mask_bits = 50 +tenant_networks_reachable = false +public_router_id = 0cb7dad9-5db3-47ea-8d7e-f165332b5309 + +# +# From tempest.config +# + +# Time in seconds between network operation status checks. (integer +# value) +#build_interval = 1 + +# Timeout in seconds to wait for network operation to complete. +# (integer value) +#build_timeout = 300 + +# Catalog type of the Neutron service. (string value) +#catalog_type = network + +# List of dns servers which should be used for subnet creation (list +# value) +#dns_servers = 8.8.8.8,8.8.4.4 + +# The endpoint type to use for the network service. (string value) +#endpoint_type = publicURL + +# Id of the public network that provides external connectivity (string +# value) +#public_network_id = + +# Id of the public router that provides external connectivity. This +# should only be used when Neutron's 'allow_overlapping_ips' is set to +# 'False' in neutron.conf. usually not needed past 'Grizzly' release +# (string value) +#public_router_id = + +# The network region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# The cidr block to allocate tenant ipv4 subnets from (string value) +#tenant_network_cidr = 10.100.0.0/16 + +# The mask bits for tenant ipv4 subnets (integer value) +#tenant_network_mask_bits = 28 + +# The cidr block to allocate tenant ipv6 subnets from (string value) +#tenant_network_v6_cidr = 2003::/48 + +# The mask bits for tenant ipv6 subnets (integer value) +#tenant_network_v6_mask_bits = 64 + +# Whether tenant network connectivity should be evaluated directly +# (boolean value) +#tenant_networks_reachable = false + + +[network-feature-enabled] +ipv6 = false +ipv6_subnet_attributes = false + +# +# From tempest.config +# + +# A list of enabled network extensions with a special entry all which +# indicates every extension is enabled. Empty list indicates all +# extensions are disabled (list value) +#api_extensions = all + +# Allow the execution of IPv6 tests (boolean value) +#ipv6 = true + +# Allow the execution of IPv6 subnet tests that use the extended IPv6 +# attributes ipv6_ra_mode and ipv6_address_mode (boolean value) +#ipv6_subnet_attributes = false + + +[object-storage] + +# +# From tempest.config +# + +# Catalog type of the Object-Storage service. (string value) +#catalog_type = object-store + +# Number of seconds to wait while looping to check the status of a +# container to container synchronization (integer value) +#container_sync_interval = 5 + +# Number of seconds to time on waiting for a container to container +# synchronization complete. (integer value) +#container_sync_timeout = 120 + +# The endpoint type to use for the object-store service. (string +# value) +#endpoint_type = publicURL + +# Role to add to users created for swift tests to enable creating +# containers (string value) +#operator_role = Member + +# The object-storage region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# User role that has reseller admin (string value) +#reseller_admin_role = ResellerAdmin + + +[object-storage-feature-enabled] + +# +# From tempest.config +# + +# Execute (old style) container-sync tests (boolean value) +#container_sync = true + +# Execute discoverability tests (boolean value) +#discoverability = true + +# A list of the enabled optional discoverable apis. A single entry, +# all, indicates that all of these features are expected to be enabled +# (list value) +#discoverable_apis = all + +# Execute object-versioning tests (boolean value) +#object_versioning = true + + +[orchestration] + +# +# From tempest.config +# + +# Time in seconds between build status checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for a stack to build. (integer value) +#build_timeout = 1200 + +# Catalog type of the Orchestration service. (string value) +#catalog_type = orchestration + +# The endpoint type to use for the orchestration service. (string +# value) +#endpoint_type = publicURL + +# Name of heat-cfntools enabled image to use when launching test +# instances. (string value) +#image_ref = + +# Instance type for tests. Needs to be big enough for a full OS plus +# the test workload (string value) +#instance_type = m1.micro + +# Name of existing keypair to launch servers with. (string value) +#keypair_name = + +# Value must match heat configuration of the same name. (integer +# value) +#max_resources_per_stack = 1000 + +# Value must match heat configuration of the same name. (integer +# value) +#max_template_size = 524288 + +# The orchestration region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + + +[scenario] +img_dir = /home/image +img_file = cirros-0.3.1-x86_64-disk.img +qcow2_img_file = cirros-0.3.1-x86_64-disk.img +qcow2_img_file_2 = xp-ssh.img +large_ops_number = 1 +ssh_user = cirros + +# +# From tempest.config +# + +# AKI image file name (string value) +#aki_img_file = cirros-0.3.1-x86_64-vmlinuz + +# AMI image file name (string value) +#ami_img_file = cirros-0.3.1-x86_64-blank.img + +# ARI image file name (string value) +#ari_img_file = cirros-0.3.1-x86_64-initrd + +# Image container format (string value) +#img_container_format = bare + +# Directory containing image files (string value) +#img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec + +# Image disk format (string value) +#img_disk_format = qcow2 + +# Image file name (string value) +# Deprecated group/name - [DEFAULT]/qcow2_img_file +#img_file = cirros-0.3.1-x86_64-disk.img + +# specifies how many resources to request at once. Used for large +# operations testing. (integer value) +#large_ops_number = 0 + +# ssh username for the image file (string value) +#ssh_user = cirros + + +[service_available] +ceilometer = false +cinder = true +glance = true +heat = false +horizon = true +ironic = false +neutron = true +nova = true +sahara = false +swift = false +trove = false +zaqar = false + +# +# From tempest.config +# + +# Whether or not Ceilometer is expected to be available (boolean +# value) +#ceilometer = true + +# Whether or not cinder is expected to be available (boolean value) +#cinder = true + +# Whether or not glance is expected to be available (boolean value) +#glance = true + +# Whether or not Heat is expected to be available (boolean value) +#heat = false + +# Whether or not Horizon is expected to be available (boolean value) +#horizon = true + +# Whether or not Ironic is expected to be available (boolean value) +#ironic = false + +# Whether or not neutron is expected to be available (boolean value) +#neutron = false + +# Whether or not nova is expected to be available (boolean value) +#nova = true + +# Whether or not Sahara is expected to be available (boolean value) +#sahara = false + +# Whether or not swift is expected to be available (boolean value) +#swift = true + +# Whether or not Trove is expected to be available (boolean value) +#trove = false + +# Whether or not Zaqar is expected to be available (boolean value) +#zaqar = false + + +[stress] + +# +# From tempest.config +# + +# Controller host. (string value) +#controller = + +# The number of threads created while stress test. (integer value) +#default_thread_number_per_action = 4 + +# Allows a full cleaning process after a stress test. Caution : this +# cleanup will remove every objects of every tenant. (boolean value) +#full_clean_stack = false + +# Prevent the cleaning (tearDownClass()) between each stress test run +# if an exception occurs during this run. (boolean value) +#leave_dirty_stack = false + +# time (in seconds) between log file error checks. (integer value) +#log_check_interval = 60 + +# Maximum number of instances to create during test. (integer value) +#max_instances = 16 + +# Directory containing log files on the compute nodes (string value) +#nova_logdir = + +# Controller host. (string value) +#target_controller = + +# regexp for list of log files. (string value) +#target_logfiles = + +# Path to private key. (string value) +#target_private_key_path = + +# ssh user. (string value) +#target_ssh_user = + + +[telemetry] + +# +# From tempest.config +# + +# Catalog type of the Telemetry service. (string value) +#catalog_type = metering + +# The endpoint type to use for the telemetry service. (string value) +#endpoint_type = publicURL + +# This variable is used as flag to enable notification tests (boolean +# value) +#too_slow_to_test = true + + +[volume] +build_timeout = 300 + +# +# From tempest.config +# + +# Name of the backend1 (must be declared in cinder.conf) (string +# value) +#backend1_name = BACKEND_1 + +# Name of the backend2 (must be declared in cinder.conf) (string +# value) +#backend2_name = BACKEND_2 + +# Time in seconds between volume availability checks. (integer value) +#build_interval = 1 + +# Timeout in seconds to wait for a volume to become available. +# (integer value) +#build_timeout = 300 + +# Catalog type of the Volume Service (string value) +#catalog_type = volume + +# Disk format to use when copying a volume to image (string value) +#disk_format = raw + +# The endpoint type to use for the volume service. (string value) +#endpoint_type = publicURL + +# The volume region name to use. If empty, the value of +# identity.region is used instead. If no such region is found in the +# service catalog, the first found one is used. (string value) +#region = + +# Backend protocol to target when creating volume types (string value) +#storage_protocol = iSCSI + +# Backend vendor to target when creating volume types (string value) +#vendor_name = Open Source + +# Default size in GB for volumes created by volumes tests (integer +# value) +#volume_size = 1 + + +[volume-feature-enabled] + +# +# From tempest.config +# + +# A list of enabled volume extensions with a special entry all which +# indicates every extension is enabled. Empty list indicates all +# extensions are disabled (list value) +#api_extensions = all + +# Is the v1 volume API enabled (boolean value) +#api_v1 = true + +# Is the v2 volume API enabled (boolean value) +#api_v2 = true + +# Runs Cinder volumes backup test (boolean value) +#backup = true + +# Runs Cinder multi-backend test (requires 2 backends) (boolean value) +#multi_backend = false + +# Runs Cinder volume snapshot test (boolean value) +#snapshot = true + + +[tecs] +host_ip = 10.43.211.177 +host_username = root +host_password = ossdbg1 + + +v4_flavor_id = 51a6f3f3-816d-4bc6-a762-797fd4e78a2a +v4_omcnet_id = 3d409993-f4cb-4acd-8ab9-5258feab0f01 +v4_basenet_id = 974588bb-b552-46d0-ae83-ffe5ac7e4205 +v4_fabricnet_id = +v4_outnet_id = + +#added for network test +network_image_id = be99d4c9-5c01-47ba-a075-9c0102b6fb44 +network_flavor_id = c79328e9-71dc-4870-bafe-a77f484d5d0e +network_ssh_user = root +network_ssh_pwd = ossdbg1 +network_server_ipaddr = 10.43.20.231 +network_server_ssh_user = root +network_server_ssh_pwd = q1w2e3 +network_server_ftp_user = vr +network_server_ftp_pwd = vr +network_lb_provider= haproxy + +[daisy] +#°Ñip»»³É°²×°daisyµÄʵ¼Êip +daisy_endpoint=http://127.0.0.1:19292 + +#жÔصļ¯Èºid£¬Èç¹û²»Ìîд¾Í´ÓÊý¾Ý¿âÖжÁÈ¡×îºóÒ»¸ö +cluster_id= + +#°²×°tecs(Ò»¶ÔhaµÄÁ½¸ö¿ØÖƽڵã)µÄÄ¿±ê»úµÄÍø¿Ú£¬ÒÔ¼°¸ÃÍø¿ÚÉϵÄip£¬macµØÖ·£¬¸¡¶¯ip +#ha°²×°Ê±Á½¸ö¿ØÖƽڵãµÄÍø¿ÚÃû±ØÐëÒ»Ñù£¬¸¡¶¯ip£¨vip£©±ØÐ뱣֤û±»Õ¼Óà +#Ò»¶Ô¿ØÖƽڵãµÄÁ½¸öipºÍmacµØÖ·£¬Ê¹ÓöººÅ·Ö¿ª¡£¸ñʽÀýÈç(10.43.177.127,10.43.177.128) +install_ha_eth_name=eth1 +install_ha_ip=192.0.1.149,192.0.2.19 +install_ha_netmask=255.0.0.0,255.0.0.0 +install_ha_mac=fa:16:3e:3a:51:4e,fa:16:3e:fc:05:3d +install_ha_vip=192.0.1.254 + +#°²×°tecsµÄ³¬Ê±Ê±¼ä(Ãë) +time_out=10800 + diff --git a/test/tempest/openstack-common.conf b/test/tempest/openstack-common.conf new file mode 100644 index 00000000..19202957 --- /dev/null +++ b/test/tempest/openstack-common.conf @@ -0,0 +1,8 @@ +[DEFAULT] + +# The list of modules to copy from openstack-common +module=install_venv_common +module=versionutils + +# The base module to hold the copy of openstack.common +base=tempest diff --git a/test/tempest/requirements.txt b/test/tempest/requirements.txt new file mode 100644 index 00000000..0d7fc0dc --- /dev/null +++ b/test/tempest/requirements.txt @@ -0,0 +1,26 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +pbr>=0.6,!=0.7,<1.0 +anyjson>=0.3.3 +httplib2>=0.7.5 +jsonschema>=2.0.0,<3.0.0 +testtools>=0.9.36,!=1.2.0 +boto>=2.32.1 +paramiko>=1.13.0 +netaddr>=0.7.12 +python-glanceclient>=0.15.0 +python-cinderclient>=1.1.0 +python-heatclient>=0.3.0 +testrepository>=0.0.18 +oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0 +oslo.config>=1.9.3,<1.10.0 # Apache-2.0 +oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0 +oslo.log>=1.0.0,<1.1.0 # Apache-2.0 +oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0 +oslo.utils>=1.4.0,<1.5.0 # Apache-2.0 +six>=1.9.0 +iso8601>=0.1.9 +fixtures>=0.3.14 +testscenarios>=0.4 +tempest-lib>=0.4.0 diff --git a/test/tempest/run_tempest.sh b/test/tempest/run_tempest.sh new file mode 100644 index 00000000..5a9b7425 --- /dev/null +++ b/test/tempest/run_tempest.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Tempest test suite" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -s, --smoke Only run smoke tests" + echo " -t, --serial Run testr serially" + echo " -C, --config Config file location" + echo " -h, --help Print this usage message" + echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB" + echo " -l, --logging Enable logging" + echo " -L, --logging-config Logging config file location. Default is etc/logging.conf" + echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr " +} + +testrargs="" +venv=.venv +with_venv=tools/with_venv.sh +serial=0 +always_venv=0 +never_venv=0 +no_site_packages=0 +debug=0 +force=0 +wrapper="" +config_file="" +update=0 +logging=0 +logging_config=etc/logging.conf + +if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@") +then + # parse error + usage + exit 1 +fi + +eval set -- $options +first_uu=yes +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) usage; exit;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -n|--no-site-packages) no_site_packages=1;; + -f|--force) force=1;; + -u|--update) update=1;; + -d|--debug) debug=1;; + -C|--config) config_file=$2; shift;; + -s|--smoke) testrargs+="smoke";; + -t|--serial) serial=1;; + -l|--logging) logging=1;; + -L|--logging-config) logging_config=$2; shift;; + --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;; + *) testrargs="$testrargs $1";; + esac + shift +done + +if [ -n "$config_file" ]; then + config_file=`readlink -f "$config_file"` + export TEMPEST_CONFIG_DIR=`dirname "$config_file"` + export TEMPEST_CONFIG=`basename "$config_file"` +fi + +if [ $logging -eq 1 ]; then + if [ ! -f "$logging_config" ]; then + echo "No such logging config file: $logging_config" + exit 1 + fi + logging_config=`readlink -f "$logging_config"` + export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"` + export TEMPEST_LOG_CONFIG=`basename "$logging_config"` +fi + +cd `dirname "$0"` + +if [ $no_site_packages -eq 1 ]; then + installvenvopts="--no-site-packages" +fi + +function testr_init { + if [ ! -d .testrepository ]; then + ${wrapper} testr init + fi +} + +function run_tests { + testr_init + ${wrapper} find . -type f -name "*.pyc" -delete + export OS_TEST_PATH=./tempest/test_discover + if [ $debug -eq 1 ]; then + if [ "$testrargs" = "" ]; then + testrargs="discover ./tempest/test_discover" + fi + ${wrapper} python -m testtools.run $testrargs + return $? + fi + + if [ $serial -eq 1 ]; then + ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py + else + ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py + fi +} + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py $installvenvopts + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py $installvenvopts + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py $installvenvopts + wrapper=${with_venv} + fi + fi + fi +fi + +run_tests +retval=$? + +exit $retval diff --git a/test/tempest/run_tests.sh b/test/tempest/run_tests.sh new file mode 100644 index 00000000..971f89bd --- /dev/null +++ b/test/tempest/run_tests.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Tempest unit tests" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -t, --serial Run testr serially" + echo " -p, --pep8 Just run pep8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB" + echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr " +} + +testrargs="" +just_pep8=0 +venv=.venv +with_venv=tools/with_venv.sh +serial=0 +always_venv=0 +never_venv=0 +no_site_packages=0 +debug=0 +force=0 +coverage=0 +wrapper="" +config_file="" +update=0 + +if ! options=$(getopt -o VNnfuctphd -l virtual-env,no-virtual-env,no-site-packages,force,update,serial,coverage,pep8,help,debug -- "$@") +then + # parse error + usage + exit 1 +fi + +eval set -- $options +first_uu=yes +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) usage; exit;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -n|--no-site-packages) no_site_packages=1;; + -f|--force) force=1;; + -u|--update) update=1;; + -d|--debug) debug=1;; + -p|--pep8) let just_pep8=1;; + -c|--coverage) coverage=1;; + -t|--serial) serial=1;; + --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;; + *) testrargs="$testrargs $1";; + esac + shift +done + + +cd `dirname "$0"` + +if [ $no_site_packages -eq 1 ]; then + installvenvopts="--no-site-packages" +fi + +function testr_init { + if [ ! -d .testrepository ]; then + ${wrapper} testr init + fi +} + +function run_tests { + testr_init + ${wrapper} find . -type f -name "*.pyc" -delete + export OS_TEST_PATH=./tempest/tests + if [ $debug -eq 1 ]; then + if [ "$testrargs" = "" ]; then + testrargs="discover ./tempest/tests" + fi + ${wrapper} python -m testtools.run $testrargs + return $? + fi + + if [ $coverage -eq 1 ]; then + ${wrapper} python setup.py test --coverage + return $? + fi + + if [ $serial -eq 1 ]; then + ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py + else + ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py + fi +} + +function run_pep8 { + echo "Running flake8 ..." + if [ $never_venv -eq 1 ]; then + echo "**WARNING**:" >&2 + echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2 + fi + ${wrapper} flake8 +} + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py $installvenvopts + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py $installvenvopts + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py $installvenvopts + wrapper=${with_venv} + fi + fi + fi +fi + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + +run_tests +retval=$? + +if [ -z "$testrargs" ]; then + run_pep8 +fi + +exit $retval diff --git a/test/tempest/setup.cfg b/test/tempest/setup.cfg new file mode 100644 index 00000000..1e7cc2be --- /dev/null +++ b/test/tempest/setup.cfg @@ -0,0 +1,36 @@ +[metadata] +name = tempest +version = 4 +summary = OpenStack Integration Testing +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Intended Audience :: Information Technology + Intended Audience :: System Administrators + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[entry_points] +console_scripts = + verify-tempest-config = tempest.cmd.verify_tempest_config:main + javelin2 = tempest.cmd.javelin:main + run-tempest-stress = tempest.cmd.run_stress:main + tempest-cleanup = tempest.cmd.cleanup:main + +oslo.config.opts = + tempest.config = tempest.config:list_opts + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[wheel] +universal = 1 diff --git a/test/tempest/setup.py b/test/tempest/setup.py new file mode 100644 index 00000000..73637574 --- /dev/null +++ b/test/tempest/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test/tempest/tempest/__init__.py b/test/tempest/tempest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/tempest/tempest/api/README.rst b/test/tempest/tempest/api/README.rst new file mode 100644 index 00000000..91e6ad6b --- /dev/null +++ b/test/tempest/tempest/api/README.rst @@ -0,0 +1,52 @@ +.. _api_field_guide: + +Tempest Field Guide to API tests +================================ + + +What are these tests? +--------------------- + +One of Tempest's prime function is to ensure that your OpenStack cloud +works with the OpenStack API as documented. The current largest +portion of Tempest code is devoted to test cases that do exactly this. + +It's also important to test not only the expected positive path on +APIs, but also to provide them with invalid data to ensure they fail +in expected and documented ways. Over the course of the OpenStack +project Tempest has discovered many fundamental bugs by doing just +this. + +In order for some APIs to return meaningful results, there must be +enough data in the system. This means these tests might start by +spinning up a server, image, etc, then operating on it. + + +Why are these tests in tempest? +------------------------------- + +This is one of the core missions for the Tempest project, and where it +started. Many people use this bit of function in Tempest to ensure +their clouds haven't broken the OpenStack API. + +It could be argued that some of the negative testing could be done +back in the projects themselves, and we might evolve there over time, +but currently in the OpenStack gate this is a fundamentally important +place to keep things. + + +Scope of these tests +-------------------- + +API tests should always use the Tempest implementation of the +OpenStack API, as we want to ensure that bugs aren't hidden by the +official clients. + +They should test specific API calls, and can build up complex state if +it's needed for the API call to be meaningful. + +They should send not only good data, but bad data at the API and look +for error codes. + +They should all be able to be run on their own, not depending on the +state created by a previous test. diff --git a/test/tempest/tempest/api/__init__.py b/test/tempest/tempest/api/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/tempest/tempest/api/daisy/__init__.py b/test/tempest/tempest/api/daisy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/tempest/tempest/api/daisy/base.py b/test/tempest/tempest/api/daisy/base.py new file mode 100644 index 00000000..95f988a8 --- /dev/null +++ b/test/tempest/tempest/api/daisy/base.py @@ -0,0 +1,562 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from tempest import config +import tempest.test +from daisyclient.v1 import client as daisy_client +from ironicclient import client as ironic_client + +CONF = config.CONF + +LOG = logging.getLogger(__name__) + + +class BaseDaisyTest(tempest.test.BaseTestCase): + + @classmethod + def skip_checks(cls): + super(BaseDaisyTest, cls).skip_checks() + + @classmethod + def resource_setup(cls): + super(BaseDaisyTest, cls).resource_setup() + cls.daisy_version = 1.0 + cls.daisy_endpoint = CONF.daisy.daisy_endpoint + cls.daisy_client = daisy_client.Client(version=cls.daisy_version, + endpoint=cls.daisy_endpoint) + cls.ironic_client = ironic_client.get_client( + 1, os_auth_token='fake', ironic_url='http://127.0.0.1:6385/v1') + + @classmethod + def resource_cleanup(cls): + super(BaseDaisyTest, cls).resource_cleanup() + + @classmethod + def add_cluster(self, **cluster_meta): + add_cluster_info = self.daisy_client.clusters.add(**cluster_meta) + return add_cluster_info + + @classmethod + def update_cluster(self, cluster_id, **cluster_meta): + update_cluster_info = self.daisy_client.clusters.update( + cluster_id, **cluster_meta) + return update_cluster_info + + @classmethod + def list_clusters(self, **cluster_meta): + clusters_info = self.daisy_client.clusters.list(**cluster_meta) + return clusters_info + + @classmethod + def list_filter_clusters(self, **cluster_meta): + cluster_meta['filters'] = cluster_meta + clusters_info = self.daisy_client.clusters.list(**cluster_meta) + return clusters_info + + @classmethod + def get_cluster(self, cluster_id): + cluster_info = self.daisy_client.clusters.get(cluster_id) + return cluster_info + + @classmethod + def _clean_all_cluster(self): + clusters_list_generator = self.daisy_client.clusters.list() + clusters_list = [clusters for clusters in clusters_list_generator] + if clusters_list: + for cluster in clusters_list: + self.delete_cluster(cluster) + + @classmethod + def add_hwm(self, **hwm_meta): + hwm_info = self.daisy_client.hwm.add(**hwm_meta) + return hwm_info + + @classmethod + def delete_hwm(self, hwm_meta): + self.daisy_client.hwm.delete(hwm_meta) + + @classmethod + def update_hwm(self, hwm_id, **hwm_meta): + hwm_info = self.daisy_client.hwm.update(hwm_id, **hwm_meta) + return hwm_info + + @classmethod + def _clean_all_hwm(self): + hwm_list_generator = self.daisy_client.hwm.list() + hwm_list = [hwms for hwms in hwm_list_generator] + if hwm_list: + for hwm in hwm_list: + self.delete_hwm(hwm) + + @classmethod + def list_hwm(self, **hwm_meta): + hwm_meta['filters'] = hwm_meta + hwm_list = self.daisy_client.hwm.list(**hwm_meta) + return hwm_list + + @classmethod + def get_hwm_detail(self, hwm_meta): + hwm_detail = self.daisy_client.hwm.get(hwm_meta) + return hwm_detail + + @classmethod + def add_host(self, **host_meta): + host_info = self.daisy_client.hosts.add(**host_meta) + return host_info + + @classmethod + def delete_host(self, host_meta): + self.daisy_client.hosts.delete(host_meta) + + @classmethod + def update_host(self, host_id, **host_meta): + host_info = self.daisy_client.hosts.update(host_id, **host_meta) + return host_info + + @classmethod + def _clean_all_host(self): + hosts_list_generator = self.daisy_client.hosts.list() + hosts_list = [hosts for hosts in hosts_list_generator] + if hosts_list: + for host in hosts_list: + self.delete_host(host) + + @classmethod + def list_host(self, **host_meta): + host_meta['filters'] = host_meta + host_list = self.daisy_client.hosts.list(**host_meta) + return host_list + + @classmethod + def get_host_detail(self, host_meta): + host_detail = self.daisy_client.hosts.get(host_meta) + return host_detail + + @classmethod + def add_discover_host(self, **host_meta): + host_info = self.daisy_client.hosts.add_discover_host(**host_meta) + return host_info + + @classmethod + def update_discover_host(self, host_id, **host_meta): + host_info = self.daisy_client.hosts.update_discover_host( + host_id, **host_meta) + return host_info + + @classmethod + def delete_discover_host(self, host_meta): + self.daisy_client.hosts.delete_discover_host(host_meta) + + @classmethod + def list_discover_host(self, **host_meta): + host_meta['filters'] = host_meta + host_list = self.daisy_client.hosts.list_discover_host(**host_meta) + return host_list + + @classmethod + def get_discover_host_detail(self, host_meta): + host_detail = self.daisy_client.hosts.get_discover_host_detail(host_meta) + return host_detail + + @classmethod + def discover_host(self, **host_meta): + host_discovery = self.daisy_client.hosts.discover_host(**host_meta) + return host_discovery + + @classmethod + def _clean_all_discover_host(self): + host_meta = {} + hosts_list_generator = self.daisy_client.hosts.list_discover_host(**host_meta) + hosts_list = [hosts for hosts in hosts_list_generator] + if hosts_list: + for host in hosts_list: + self.delete_discover_host(host) + + @classmethod + def add_network(self, **network_meta): + network_info = self.daisy_client.networks.add(**network_meta) + return network_info + + @classmethod + def get_network(self, network_id): + network_info = self.daisy_client.networks.get(network_id) + return network_info + + @classmethod + def list_network(self, **network_meta): + network = {'sort_key': 'name', + 'sort_dir': 'asc', + 'filters': network_meta} + network_infos = self.daisy_client.networks.list(**network) + return network_infos + + @classmethod + def update_network(self, network_id, **network_meta): + network_info = self.daisy_client.networks.update(network_id, + **network_meta) + return network_info + + @classmethod + def delete_network(self, network_id): + self.daisy_client.networks.delete(network_id) + + @classmethod + def list_roles(self, **role_meta): + roles_info = self.daisy_client.roles.list(**role_meta) + return roles_info + + @classmethod + def add_role(self, **role_meta): + roles_info = self.daisy_client.roles.add(**role_meta) + return roles_info + + @classmethod + def get_role(self, role_id): + role_info = self.daisy_client.roles.get(role_id) + return role_info + + @classmethod + def delete_role(self, role_id): + self.daisy_client.roles.delete(role_id) + + @classmethod + def update_role(self, role_id, **role_meta): + role_info = self.daisy_client.roles.update(role_id, **role_meta) + return role_info + + @classmethod + def install(self, **install_meta): + install_info = self.daisy_client.install.install(**install_meta) + return install_info + + @classmethod + def get_cluster_id(self, cluster_meta): + if not cluster_meta: + cluster_list = self.daisy_client.clusters.list() + for cluster in cluster_list: + cluster_id = {'cluster_id': cluster.id} + else: + cluster_id = {'cluster_id': cluster_meta} + return cluster_id + + @classmethod + def get_uninstall_status(self, **cluster_id): + nodes = self.daisy_client.uninstall.query_progress(**cluster_id) + return nodes + + @classmethod + def delete_cluster(self, cluster_meta): + self.daisy_client.clusters.delete(cluster_meta) + + @classmethod + def uninstall(self, **cluster_id): + self.daisy_client.uninstall.uninstall(**cluster_id) + + @classmethod + def update(self, **cluster_id): + self.daisy_client.update.update(**cluster_id) + + @classmethod + def get_update_status(self, **cluster_id): + nodes = self.daisy_client.update.query_progress(**cluster_id) + return nodes + + @classmethod + def list_components(self, **component_meta): + components_info = self.daisy_client.components.list(**component_meta) + return components_info + + @classmethod + def add_config(self, **config_meta): + config_info = self.daisy_client.configs.add(**config_meta) + return config_info + + @classmethod + def get_config(self, config_id): + config_meta = {} + config_info = self.daisy_client.configs.get(config_id, **config_meta) + return config_info + + def delete_config(self, config_id): + config = {'config': [config_id]} + self.daisy_client.configs.delete(**config) + + @classmethod + def _clean_all_config(self): + configs_list_generator = self.daisy_client.configs.list() + configs_list = [configs for configs in configs_list_generator] + if configs_list: + for _config in configs_list: + _config = {'config': [config.id]} + self.daisy_client.configs.delete(**_config) + + @classmethod + def list_config(self): + configs_list = self.daisy_client.configs.list() + return configs_list + + @classmethod + def cluster_config_set_update(self, **config_set): + config_set = self.daisy_client.config_sets.cluster_config_set_update( + **config_set) + return config_set + + @classmethod + def cluster_config_set_progress(self, **config_set): + config_set = self.daisy_client.config_sets.cluster_config_set_progress(**config_set) + return config_set + + @classmethod + def add_config_set(self, **config_set): + config_set = self.daisy_client.config_sets.add(**config_set) + return config_set + + @classmethod + def update_config_set(self, config_set_id, **config_set): + config_set = self.daisy_client.config_sets.update( + config_set_id, **config_set) + return config_set + + @classmethod + def get_config_set(self, config_set_id): + config_set = self.daisy_client.config_sets.get(config_set_id) + return config_set + + def list_config_set(self): + config_set_list = self.daisy_client.config_sets.list() + return config_set_list + + def delete_config_set(self, config_set_id): + self.daisy_client.config_sets.delete(config_set_id) + + @classmethod + def _clean_all_config_set(self): + config_set_list_generator = self.daisy_client.config_sets.list() + config_set_list = [config_set for config_set in config_set_list_generator] + if config_set_list: + for config_set in config_set_list: + self.daisy_client.config_sets.delete(config_set.id) + + @classmethod + def add_config_file(self, **config_file): + config_file = self.daisy_client.config_files.add(**config_file) + return config_file + + @classmethod + def update_config_file(self, config_file_id, **config_file): + config_file = self.daisy_client.config_files.update( + config_file_id, **config_file) + return config_file + + @classmethod + def get_config_file(self, config_file_id): + config_file = self.daisy_client.config_files.get(config_file_id) + return config_file + + def list_config_file(self): + config_file_list = self.daisy_client.config_files.list() + return config_file_list + + def delete_config_file(self, config_file_id): + self.daisy_client.config_files.delete(config_file_id) + + @classmethod + def _clean_all_config_file(self): + config_file_list_generator = self.daisy_client.config_files.list() + config_file_list = [config_file for config_file in config_file_list_generator] + if config_file_list: + for config_file in config_file_list: + self.daisy_client.config_files.delete(config_file.id) + + @classmethod + def list_service(self, **service_meta): + services_info = self.daisy_client.services.list(**service_meta) + return services_info + + @classmethod + def add_service(self, **service_meta): + service_info = self.daisy_client.services.add(**service_meta) + return service_info + + @classmethod + def get_service(self, service_id): + service_info = self.daisy_client.services.get(service_id) + return service_info + + @classmethod + def delete_service(self, service_id): + self.daisy_client.services.delete(service_id) + + @classmethod + def update_service(self, service_id, **service_meta): + service_info = self.daisy_client.services.update( + service_id, **service_meta) + return service_info + + @classmethod + def list_component(self, **component_meta): + components_info = self.daisy_client.components.list(**component_meta) + return components_info + + @classmethod + def add_component(self, **component_meta): + component_info = self.daisy_client.components.add(**component_meta) + return component_info + + @classmethod + def get_component(self, component_id): + component_info = self.daisy_client.components.get(component_id) + return component_info + + @classmethod + def delete_component(self, component_id): + self.daisy_client.components.delete(component_id) + + @classmethod + def update_component(self, component_id, **component_meta): + component_info = self.daisy_client.components.update( + component_id, **component_meta) + return component_info + + @classmethod + def add_cinder_volume(self, **cinder_volume_meta): + cinder_volume_info = self.daisy_client.disk_array.cinder_volume_add( + **cinder_volume_meta) + return cinder_volume_info + + @classmethod + def update_cinder_volume(self, cinder_volume_id, **cinder_volume_meta): + cinder_volume_info = self.daisy_client.disk_array.cinder_volume_update(cinder_volume_id, **cinder_volume_meta) + return cinder_volume_info + + @classmethod + def delete_cinder_volume(self, cinder_volume_id): + self.daisy_client.disk_array.cinder_volume_delete(cinder_volume_id) + + @classmethod + def list_cinder_volume(self, **cinder_volume_meta): + cinder_volume_meta['filters'] = cinder_volume_meta + cinder_volume_list = self.daisy_client.disk_array.cinder_volume_list( + **cinder_volume_meta) + return cinder_volume_list + + @classmethod + def get_cinder_volume_detail(self, cinder_volume_id): + cinder_volume_info = self.daisy_client.disk_array.cinder_volume_detail(cinder_volume_id) + return cinder_volume_info + + @classmethod + def add_service_disk(self, **service_disk_meta): + service_disk_info = self.daisy_client.disk_array.service_disk_add( + **service_disk_meta) + return service_disk_info + + @classmethod + def update_service_disk(self, service_disk_id, **service_disk_meta): + service_disk_info = self.daisy_client.disk_array.service_disk_update( + service_disk_id, **service_disk_meta) + return service_disk_info + + @classmethod + def delete_service_disk(self, service_disk_id): + self.daisy_client.disk_array.service_disk_delete(service_disk_id) + + @classmethod + def list_service_disk(self, **service_disk_meta): + service_disk_meta['filters'] = service_disk_meta + service_disk_list = self.daisy_client.disk_array.service_disk_list( + **service_disk_meta) + return service_disk_list + + @classmethod + def get_service_disk_detail(self, service_disk_id): + service_disk_detail = self.daisy_client.disk_array.service_disk_detail(service_disk_id) + return service_disk_detail + + @classmethod + def _clean_all_physical_node(self): + physical_node_list_generator = self.ironic_client.physical_node.list() + physical_node_list = [physical_node for physical_node in physical_node_list_generator] + if physical_node_list: + for physical_node in physical_node_list: + self.ironic_client.physical_node.delete(physical_node.uuid) + + @classmethod + def template_add(self, **template): + template = self.daisy_client.template.add(**template) + return template + + @classmethod + def template_update(self, template_id, **template): + template = self.daisy_client.template.update(template_id, **template) + return template + + @classmethod + def template_detail(self, template_id): + template = self.daisy_client.template.get(template_id) + return template + + @classmethod + def template_list(self, **kwargs): + template = self.daisy_client.template.list(**kwargs) + return template + + @classmethod + def template_delete(self, template_id): + template = self.daisy_client.template.delete(template_id) + return template + + @classmethod + def export_db_to_json(self, **kwargs): + template = self.daisy_client.template.export_db_to_json(**kwargs) + return template + + @classmethod + def import_json_to_template(self, **kwargs): + template = self.daisy_client.template.import_json_to_template(**kwargs) + return template + + @classmethod + def import_template_to_db(self, **kwargs): + template = self.daisy_client.template.import_template_to_db(**kwargs) + return template + + @classmethod + def _clean_all_template(self): + template_generator = self.daisy_client.template.list() + templates = [template for template in template_generator] + if templates: + for template in templates: + self.template_delete(template.id) + + @classmethod + def host_to_template(self, **kwargs): + host_template = self.daisy_client.template.host_to_template(**kwargs) + return host_template + + @classmethod + def template_to_host(self, **kwargs): + hosts = self.daisy_client.template.template_to_host(**kwargs) + return hosts + + @classmethod + def host_template_list(self, **kwargs): + host_templates = self.daisy_client.template.host_template_list(**kwargs) + return host_templates + + @classmethod + def delete_host_template(self, **kwargs): + template = self.daisy_client.template.delete_host_template(**kwargs) + return template diff --git a/test/tempest/tempest/api/daisy/v1/__init__.py b/test/tempest/tempest/api/daisy/v1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/tempest/tempest/api/daisy/v1/getnodeinfo.sh b/test/tempest/tempest/api/daisy/v1/getnodeinfo.sh new file mode 100755 index 00000000..c7b44adb --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/getnodeinfo.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +DISCOVERD_URL="http://127.0.0.1:5050/v1/continue" +#DISCOVERD_URL="http://192.168.0.11:5050/v1/continue" +function update() { + jq "$1" data.json > temp.json || echo "Error: update $1 to json failed" + mv temp.json data.json +} + +function get_system_info(){ + PRODUCT=$(dmidecode -s system-product-name) + FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2) + VERSION=$(dmidecode -s system-version) + SERIAL=$(dmidecode -s system-serial-number) + MANUFACTURER=$(dmidecode -s system-manufacturer) + UUID=$(dmidecode -s system-uuid) + #FQDN=$(hostname -f) + FQDN='Hostname' + echo '{"system":{}}' > data.json + update ".system[\"product\"] = \"$PRODUCT\"" + update ".system[\"family\"] = \"$FAMILY\"" + update ".system[\"fqdn\"] = \"$FQDN\"" + update ".system[\"version\"] = \"$VERSION\"" + update ".system[\"serial\"] = \"$SERIAL\"" + update ".system[\"manufacturer\"] = \"$MANUFACTURER\"" + update ".system[\"uuid\"] = \"$UUID\"" +} + +function get_cpu_info(){ + REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l) + TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l) + update ".cpu[\"real\"] = $REAL" + update ".cpu[\"total\"] = $TOTAL" + + for i in $(seq $TOTAL) + do + if [ ! -z "$i" ]; then + SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p") + SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p") + update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}" + fi + done +} + +function get_memory_info(){ + PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l) + TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2) + update ".memory[\"total\"] = \"$TOTAL_MEM\"" + for num in $(seq $PHY_NUM) + do + SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p") + MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p") + update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}" + + for i in $(seq $SLOTS) + do + if [ ! -z "$i" ]; then + DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p") + DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p") + DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p") + update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}" + fi + done + done +} + +function get_net_info(){ + for iface in $(ls /sys/class/net/ | grep -v lo) + do + NAME=$iface + MAC=$(ip link show $iface | awk '/ether/ {print $2}') + IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }') + NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}') + STATE=$(ip link show $iface | awk '/mtu/ {print $3}') + PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2) + CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}') + LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}') + #LINE=$[ LINE - 1 ] + LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}') + if [ $LINE -eq $LINE_SPEED ]; then + MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2) + else + MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}') + fi + + UP="UP" + if [[ "$STATE" =~ "$UP" ]]; then + STATE="up" + else + STATE="down" + fi + if [ ! -z "$MAC" ]; then + update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\"}" + fi + done +} + +function get_disk_info(){ + for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}') + do + DISK_NAME=$disk + DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep $disk|cut -d "," -f2) + #DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}') + DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p) + DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p) + MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2) + REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}') + update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}" + done +} + +function main(){ + get_system_info + get_cpu_info + get_memory_info + get_net_info + get_disk_info +} +main + +update ".ipmi_address = \"127.0.0.1\"" +update ".data_name = \"baremetal_source\"" + +echo Collected: +cat data.json + +RESULT=$(eval curl -i -X POST \ + "-H 'Accept: application/json'" \ + "-H 'Content-Type: application/json'" \ + "-d @data.json" \ + "$DISCOVERD_URL") + +if echo $RESULT | grep "HTTP/1.0 4"; then + echo "Ironic API returned error: $RESULT" +fi + +echo "Node is now discovered! Halting..." +sleep 5 diff --git a/test/tempest/tempest/api/daisy/v1/mergeLog.py b/test/tempest/tempest/api/daisy/v1/mergeLog.py new file mode 100755 index 00000000..4dc6dc7f --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/mergeLog.py @@ -0,0 +1,38 @@ +import os +import time +import re + + +def mergeLog(): + xmlHeader = '' + daisyHeader = '' + daisyEnder = '' + + xmlList = [] + xmlList.append(xmlHeader) + xmlList.append(daisyHeader) + + for root, _, files in os.walk(r'.'): + for filename in files: + if (os.path.splitext(filename)[0] != 'daisy' and + os.path.splitext(filename)[0] != 'daisy_sonar' and + os.path.splitext(filename)[1] == '.xml'): + filepath = os.path.join(root, filename) + fin = open(filepath) + xmlList.append(fin.read()[len(xmlHeader):]) + fin.close() + + xmlList.append(daisyEnder) + text = ''.join(xmlList) + + text = re.sub('message=".*?"', 'message=""', text) + fout = open('./daisy.xml', 'w') + fout.write(text) + fout.close() + + text = re.sub('', '', text, flags=re.S) + fout = open('./daisy_sonar.xml', 'w') + fout.write(text) + fout.close() + +mergeLog() diff --git a/test/tempest/tempest/api/daisy/v1/test_cinder_volume.py b/test/tempest/tempest/api/daisy/v1/test_cinder_volume.py new file mode 100755 index 00000000..990b1502 --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_cinder_volume.py @@ -0,0 +1,292 @@ +from tempest.api.daisy import base +from tempest import config +from nose.tools import set_trace +from daisyclient import exc as client_exc +import copy +from fake.logical_network_fake import FakeLogicNetwork as logical_fake + +CONF = config.CONF + + +class DaisyCinderVolumeTest(base.BaseDaisyTest): + + @classmethod + def resource_setup(cls): + super(DaisyCinderVolumeTest, cls).resource_setup() + cls.fake = logical_fake() + + cls.cinder_volume_add_meta = {'disk_array': [{'management_ips': '10.43.177.1,10.43.177.2', + 'pools': 'pool1,pool2', + 'user_name': 'rooot', + 'user_pwd': 'pwd', + 'volume_driver': 'KS3200_FCSAN', + 'volume_type': 'KISP-1'}]} + + cls.cinder_volume_update_meta = {'management_ips': '10.43.177.3', + 'pools': 'pool3', + 'user_name': 'rooot', + 'user_pwd': 'pwd', + 'volume_driver': 'KS3200_FCSAN', + 'volume_type': 'KISP-1'} + + cls.cluster_meta = {'description': 'desc', + 'logic_networks': [{'name': 'external1', + 'physnet_name': 'phynet2', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150'], + ['172.16.1.151', + '172.16.1.254']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'external'}, + {'name': 'internal2', + 'physnet_name': 'phynet1', + 'segmentation_id': 1023, + 'segmentation_type': 'vxlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.2.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.2.130', + '192.168.2.254']], + 'gateway': '192.168.2.1', + 'name': 'subnet123'}], + 'type': 'internal'}, + {'name': 'internal1', + 'physnet_name': 'phynet3', + 'segmentation_id': '777', + 'segmentation_type': 'vlan', + 'shared': False, + 'subnets': [{'cidr': '192.168.31.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.31.130', + '192.168.31.254']], + 'gateway': '192.168.31.1', + 'name': 'subnet3'}, + {'cidr': '192.168.4.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.4.130', + '192.168.4.254']], + 'gateway': '192.168.4.1', + 'name': 'subnet4'}], + 'type': 'internal'}], + 'name': 'test', + 'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 2000], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,vxlan', + 'vlan_range': [2, 4094], + 'vni_range': [1000, 1030]}, + 'networks': [], + 'nodes': [], + 'routers': [{'description': 'router1', + 'external_logic_network': 'external1', + 'name': 'router1', + 'subnets': ['subnet4', 'subnet3', 'subnet2']}, + {'description': 'router2', + 'external_logic_network': 'external1', + 'name': 'router2', + 'subnets': ['subnet10']}]} + cls.role_meta = {'name': 'test_role', + 'description': 'test'} + + def private_network_add(self): + # add network plane + private_network_params = self.fake.fake_private_network_parameters() + private_network_params1 = self.fake.fake_private_network_parameters1() + private_network_params2 = self.fake.fake_private_network_parameters2() + + private_network_params = self.add_network(**private_network_params) + private_network_params1 = self.add_network(**private_network_params1) + private_network_params2 = self.add_network(**private_network_params2) + + self.private_network_id = private_network_params.id + self.private_network_id1 = private_network_params1.id + self.private_network_id2 = private_network_params2.id + + self.cluster_meta['networks'] = [self.private_network_id, + self.private_network_id1, + self.private_network_id2] + + return copy.deepcopy(private_network_params) + + def private_network_delete(self): + set_trace() + self.delete_network(self.private_network_id) + self.delete_network(self.private_network_id1) + self.delete_network(self.private_network_id2) + + def test_add_cinder_volume(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + self.assertEqual('10.43.177.1,10.43.177.2', + cinder_volume_info.management_ips, + "test_add_cinder_volume failed") + self.delete_cinder_volume(cinder_volume_info.id) + + def test_add_same_cinder_volume(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + self.cinder_volume_add_meta['role_id'] + + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + self.assertRaisesMessage(client_exc.HTTPBadRequest, + "400 Bad Request: cinder_volume array disks " + "conflict with cinder_volume %s (HTTP 400)" % + cinder_volume_info.id, + self.add_cinder_volume, + **self.cinder_volume_add_meta) + self.delete_cinder_volume(cinder_volume_info.id) + + def test_add_cinder_volume_with_wrong_role(self): + self.cinder_volume_add_meta['role_id'] = 'af47d81c-7ae4-4148-a801-b4a5c6a52074' + + self.assertRaisesMessage(client_exc.HTTPNotFound, + "404 Not Found: The resource could not be " + "found.: Role with identifier " + "af47d81c-7ae4-4148-a801-b4a5c6a52074 not " + "found (HTTP 404)", + self.add_cinder_volume, + **self.cinder_volume_add_meta) + del self.cinder_volume_add_meta['role_id'] + + def test_add_cinder_volume_with_wrong_driver(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + self.cinder_volume_add_meta['disk_array'][0]['volume_driver'] = 'test_driver' + + self.assertRaisesMessage(client_exc.HTTPBadRequest, + "400 Bad Request: volume_driver test_driver " + "is not supported (HTTP 400)", + self.add_cinder_volume, + **self.cinder_volume_add_meta) + del self.cinder_volume_add_meta['role_id'] + self.cinder_volume_add_meta['disk_array'][0]['volume_driver'] = 'KS3200_FCSAN' + + def test_update_cinder_volume(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + + cinder_volume_update_info = self.update_cinder_volume(cinder_volume_info.id, **self.cinder_volume_update_meta) + self.assertEqual('10.43.177.3', + cinder_volume_update_info.management_ips, + "test_update_cinder_volume failed") + self.delete_cinder_volume(cinder_volume_info.id) + + def test_update_to_same_cinder_volume(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + + cinder_volume_add_meta1 = {'disk_array': [{'management_ips': + '10.43.177.3,10.43.177.4', + 'pools': 'pool1,pool2', + 'user_name': 'rooot', + 'user_pwd': 'pwd', + 'volume_driver': 'KS3200_FCSAN', + 'volume_type': 'KISP-1'}]} + cinder_volume_add_meta1['role_id'] = role.id + cinder_volume_info1 = self.add_cinder_volume(**cinder_volume_add_meta1) + update_meta = {'management_ips': '10.43.177.1,10.43.177.2'} + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: cinder_volume array disks conflict with " + "cinder_volume %s (HTTP 400)" % cinder_volume_info.id, + self.update_cinder_volume, + cinder_volume_info1.id, + **update_meta) + + self.delete_cinder_volume(cinder_volume_info.id) + self.delete_cinder_volume(cinder_volume_info1.id) + + def test_update_cinder_volume_with_wrong_driver(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + + update_meta = {'volume_driver': 'test_driver'} + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: volume_driver test_driver is not supported" + " (HTTP 400)", + self.update_cinder_volume, cinder_volume_info.id, **update_meta) + self.delete_cinder_volume(cinder_volume_info.id) + + def test_list_cinder_volume(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + + cinder_volume_meta = {} + cinder_volume_flag = False + list_cinder_volume = self.list_cinder_volume(**cinder_volume_meta) + query_cinder_volume_list = [volume_info for volume_info in list_cinder_volume] + + if query_cinder_volume_list: + cinder_volume_flag = True + self.assertTrue(cinder_volume_flag, "test_list_cinder_volume error") + self.delete_cinder_volume(cinder_volume_info.id) + + def test_get_cinder_volume_detail(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta) + self.role_meta['cluster_id'] = cluster_info.id + role = self.add_role(**self.role_meta) + self.cinder_volume_add_meta['role_id'] = role.id + cinder_volume_info = self.add_cinder_volume(**self.cinder_volume_add_meta) + + cinder_volume_detail_info = self.get_cinder_volume_detail(cinder_volume_info.id) + self.assertEqual("10.43.177.1,10.43.177.2", + cinder_volume_detail_info.management_ips, + "test_get_cinder_volume_detail failed") + self.delete_cinder_volume(cinder_volume_info.id) + + def tearDown(self): + if self.cinder_volume_add_meta.get('role_id', None): + self.delete_role(self.cinder_volume_add_meta['role_id']) + del self.cinder_volume_add_meta['role_id'] + if self.role_meta.get('cluster_id', None): + self.delete_cluster(self.role_meta['cluster_id']) + del self.role_meta['cluster_id'] + + super(DaisyCinderVolumeTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_cluster.py b/test/tempest/tempest/api/daisy/v1/test_cluster.py new file mode 100755 index 00000000..a51e1d8a --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_cluster.py @@ -0,0 +1,371 @@ +# -*- coding: UTF-8 -*- +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import logging +from tempest.api.daisy import base +from tempest import config +from fake.logical_network_fake import FakeLogicNetwork as logical_fake +import copy + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +class TecsClusterTest(base.BaseDaisyTest): + + @classmethod + def resource_setup(cls): + super(TecsClusterTest, cls).resource_setup() + cls.fake = logical_fake() + + cls.host_meta = {'name': 'test_add_host', + 'description': 'test_tempest'} + cls.cluster_meta1 = {'description': 'desc', + 'name': 'test'} + + cls.cluster_meta2 = {'description': 'desc', + 'logic_networks': [{'name': 'external1', + 'physnet_name': 'phynet2', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'external'}, + {'name': 'internal1', + 'physnet_name': 'phynet1', + 'segmentation_id': '777', + 'segmentation_type': 'vlan', + 'shared': False, + 'subnets': [{'cidr': '192.168.31.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.31.130', + '192.168.31.254']], + 'gateway': '192.168.31.1', + 'name': 'subnet3'}, + {'cidr': '192.168.4.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.4.130', + '192.168.4.254']], + 'gateway': '192.168.4.1', + 'name': 'subnet4'}], + 'type': 'internal'}], + 'name': 'test', + 'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 2000], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,vxlan', + 'vlan_range': [2, 4094], + 'vni_range': [1000, 1030]}, + 'networks': [], + 'nodes': [], + 'routers': [{'description': 'router1', + 'external_logic_network': 'external1', + 'name': 'router1', + 'subnets': ['subnet4']}, + {'description': 'router2', + 'external_logic_network': 'external1', + 'name': 'router2', + 'subnets': ['subnet10']}]} + cls.cluster_meta3 = {'description': "This cluster's name is null", + 'name': ""} + cls.cluster_meta4 = {'description': "", + 'name': "rwj_test_add_cluster_no_description"} + cls.cluster_meta5 = {'description': "test_add_host5", + 'name': "test_add_host5"} + cls.cluster_meta6 = {'description': "test_add_host6", + 'name': "test_add_host6"} + cls.cluster_meta7 = {'description': "test_add_host7", + 'name': "test_add_host7"} + cls.cluster_meta8 = {'description': "test_add_host7", + 'name': "test_add_host7", + 'auto_scale': 1} + cls.cluster_meta9 = {'description': "test_with_hwm", + 'name': "test_with_hwm", + 'hwm_ip': "10.43.211.63"} + + def private_network_add(self): + private_network_params = self.fake.fake_private_network_parameters() + private_network_params1 = self.fake.fake_private_network_parameters1() + private_network_params2 = self.fake.fake_private_network_parameters2() + + private_network_params = self.add_network(**private_network_params) + private_network_params1 = self.add_network(**private_network_params1) + private_network_params2 = self.add_network(**private_network_params2) + + self.private_network_id = private_network_params.id + self.private_network_id1 = private_network_params1.id + self.private_network_id2 = private_network_params2.id + + self.cluster_meta2['networks'] = [self.private_network_id, + self.private_network_id1, + self.private_network_id2] + + return copy.deepcopy(private_network_params) + + def private_network_delete(self): + self.delete_network(self.private_network_id) + self.delete_network(self.private_network_id1) + self.delete_network(self.private_network_id2) + + def test_add_cluster_with_networking_parameters(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta2) + self.assertEqual(self.cluster_meta2['name'], cluster_info.name, "cluster name is not correct") + self.assertEqual(self.cluster_meta2['description'], cluster_info.description, "cluster add interface execute failed") + self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_info.base_mac, "cluster add interface execute failed") + self.assertEqual(self.cluster_meta2['networking_parameters']['net_l23_provider'], cluster_info.net_l23_provider, "cluster add interface execute failed") + self.assertEqual(self.cluster_meta2['networking_parameters']['public_vip'], cluster_info.public_vip, "cluster add interface execute failed") + self.assertEqual(self.cluster_meta2['networking_parameters']['segmentation_type'], cluster_info.segmentation_type, "cluster add interface execute failed") + self.delete_cluster(cluster_info.id) + + def test_add_cluster_no_networking_parameters(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + self.assertEqual(self.cluster_meta1['name'], cluster_info.name, "cluster add interface is not correct") + self.assertEqual(self.cluster_meta1['description'], cluster_info.description, "cluster add interface execute failed") + self.delete_cluster(cluster_info.id) + + def test_add_cluster_with_networking_parameters_no_routers(self): + if self.cluster_meta2.get('routers', None): + self.private_network_add() + cluster_temp = self.cluster_meta2.copy() + del cluster_temp['routers'] + cluster_info = self.add_cluster(**cluster_temp) + # cluster = self.get_cluster(cluster_info.id) + self.assertEqual(cluster_temp['name'], cluster_info.name, "cluster add interface execute failed") + self.delete_cluster(cluster_info.id) + + def test_add_cluster_with_nodes(self): + host_info = self.add_host(**self.host_meta) + nodes = [] + nodes.append(host_info.id) + self.cluster_meta1['nodes'] = nodes + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + cluster = self.get_cluster(cluster_info.id) + self.assertEqual(self.cluster_meta1['name'], cluster.name, "add cluster with nodes is not correct") + self.assertEqual(self.cluster_meta1['description'], cluster.description, "add cluster with nodes execute failed") + self.assertEqual(self.cluster_meta1['nodes'], cluster.nodes, "add cluster with nodes execute failed") + self.delete_cluster(cluster_info.id) + self.delete_host(host_info.id) + + def test_update_cluster_with_no_networking_parameters(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + self.cluster_meta1['name'] = "test_name" + self.cluster_meta1['description'] = "test_desc" + cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta1) + self.assertEqual(self.cluster_meta1['name'], cluster_update_info.name, "cluster update interface is not correct") + self.assertEqual(self.cluster_meta1['description'], cluster_update_info.description, "cluster update interface is not correct") + self.delete_cluster(cluster_info.id) + + def test_update_cluster_with_nodes(self): + host_info = self.add_host(**self.host_meta) + nodes = [] + nodes.append(host_info.id) + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + self.cluster_meta1['nodes'] = nodes + cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta1) + cluster = self.get_cluster(cluster_info.id) + self.assertEqual(self.cluster_meta1['name'], cluster_update_info.name, "update cluster with nodes is not correct") + self.assertEqual(self.cluster_meta1['description'], cluster_update_info.description, "update cluster with nodes execute failed") + self.assertEqual(self.cluster_meta1['nodes'], cluster.nodes, "update cluster with nodes execute failed") + self.delete_cluster(cluster_info.id) + self.delete_host(host_info.id) + + def test_update_cluster_with_networking_parameters(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta2) + self.assertEqual(self.cluster_meta2['name'], cluster_update_info.name, "update cluster with networking parameters is not correct") + self.assertEqual(self.cluster_meta2['description'], cluster_update_info.description, "update cluster with networking parameters execute failed") + # cluster = self.get_cluster(cluster_info.id) + self.delete_cluster(cluster_info.id) + + def test_update_cluster_with_hwm(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta1) + hwm_meta = {"hwm_ip": "10.43.211.63"} + cluster_update_info = self.update_cluster(cluster_info.id, **hwm_meta) + self.assertEqual("10.43.211.63", cluster_update_info.hwm_ip, + "Update cluster with hwm_ip failed") + self.delete_cluster(cluster_info.id) + + def test_update_cluster_with_networking_parameters_add_router(self): + """ """ + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta2) + router = {'description': 'router3', + 'external_logic_network': 'external1', + 'name': 'router3', + 'subnets': ['subnet3']} + self.cluster_meta2['routers'].append(router) + + cluster_update_info = self.update_cluster(cluster_info.id, **self.cluster_meta2) + self.assertEqual(self.cluster_meta2['name'], cluster_update_info.name, "update cluster with networking parameters is not correct") + self.assertEqual(self.cluster_meta2['description'], cluster_update_info.description, "update cluster with networking parameters execute failed") + # cluster = self.get_cluster(cluster_info.id) + self.delete_cluster(cluster_info.id) + + def test_list_cluster(self): + # filter_cluster_meta = {} + # self.list_clusters() + pass + + def test_list_cluster_filter_by_name(self): + self.add_cluster(**self.cluster_meta1) + # cluster_info5 = self.add_cluster(**self.cluster_meta5) + filter_cluster_meta = {'name': "test"} + list_clusters = self.list_filter_clusters(**filter_cluster_meta) + cluster_flag = False + for query_cluster in list_clusters: + if query_cluster.name == "test": + cluster_flag = True + self.assertTrue(cluster_flag, "test_list_cluster_filter_by_name error") + + def test_delete_cluster(self): + cluster_info1 = self.add_cluster(**self.cluster_meta1) + cluster_info5 = self.add_cluster(**self.cluster_meta5) + self.delete_cluster(cluster_info1.id) + cluster_flag = True + cluster_meta = {} + list_cluster = self.list_clusters(**cluster_meta) + for query_cluster in list_cluster: + if query_cluster.id == cluster_info1.id: + cluster_flag = False + self.assertTrue(cluster_flag, "test_delete_cluster error") + self.delete_cluster(cluster_info5.id) + + def test_list_cluster_by_sort_key(self): + cluster_info5 = self.add_cluster(**self.cluster_meta5) + cluster_info6 = self.add_cluster(**self.cluster_meta6) + cluster_info7 = self.add_cluster(**self.cluster_meta7) + cluster_id_sort = sorted([cluster_info5.id, cluster_info6.id, cluster_info7.id], reverse=True) + cluster_meta = {'sort_key': "id"} + list_cluster = self.list_clusters(**cluster_meta) + query_cluster_id_list = [cluster_info.id for cluster_info in list_cluster] + self.assertEqual(query_cluster_id_list, cluster_id_sort, "test_list_cluster_by_sort_key error") + self.delete_cluster(cluster_info5.id) + self.delete_cluster(cluster_info6.id) + self.delete_cluster(cluster_info7.id) + + def test_list_cluster_by_sort_dir(self): + cluster_info5 = self.add_cluster(**self.cluster_meta5) + cluster_info6 = self.add_cluster(**self.cluster_meta6) + cluster_info7 = self.add_cluster(**self.cluster_meta7) + cluster_name_sort = ['test_add_host7', 'test_add_host6', 'test_add_host5'] + cluster_meta = {'sort_dir': "desc", 'sort_key': "name"} + list_cluster = self.list_clusters(**cluster_meta) + query_cluster_name_list = [cluster_info.name for cluster_info in list_cluster] + self.assertEqual(query_cluster_name_list, cluster_name_sort, "test_list_cluster_by_sort_dir error") + self.delete_cluster(cluster_info5.id) + self.delete_cluster(cluster_info6.id) + self.delete_cluster(cluster_info7.id) + + def test_list_cluster_by_sort_limit(self): + cluster_info5 = self.add_cluster(**self.cluster_meta5) + cluster_info6 = self.add_cluster(**self.cluster_meta6) + cluster_info7 = self.add_cluster(**self.cluster_meta7) + cluster_meta = {'page_size': "1", 'sort_dir': "desc", 'sort_key': "name"} + list_cluster = self.list_clusters(**cluster_meta) + query_cluster_id_list = [cluster_info.id for cluster_info in list_cluster] + self.assertEqual(query_cluster_id_list, [cluster_info7.id], "test_list_cluster_by_sort_key error") + self.delete_cluster(cluster_info5.id) + self.delete_cluster(cluster_info6.id) + self.delete_cluster(cluster_info7.id) + + def test_add_cluster_with_neutron_parameters(self): + self.private_network_add() + add_host = self.add_cluster(**self.cluster_meta2) + cluster_detail = self.get_cluster(add_host.id) + self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_detail.base_mac, "cluster add networking_parameters failed") + router_flag = False + floating_ranges_flag = False + dns_nameservers_flag = False + if (cluster_detail.routers[0]['name'] == 'router1') or (cluster_detail.routers[0]['name'] == 'router2'): + router_flag = True + if (cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.4.130', '192.168.4.254']]) or \ + (cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.1.2', '192.168.1.200']]) or \ + (cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['172.16.1.130', '172.16.1.150']]) or \ + (cluster_detail.logic_networks[0]['subnets'][0]['floating_ranges'] == [['192.168.31.130', '192.168.31.254']]): + floating_ranges_flag = True + if cluster_detail.logic_networks[0]['subnets'][0]['dns_nameservers'] == ['8.8.8.8', '8.8.4.4'] or \ + cluster_detail.logic_networks[0]['subnets'][0]['dns_nameservers'] == ['8.8.4.4', '8.8.8.8']: + dns_nameservers_flag = True + self.assertTrue(router_flag, "cluster add floating_ranges failed") + self.assertTrue(floating_ranges_flag, "cluster add floating_ranges failed") + self.assertTrue(dns_nameservers_flag, "cluster add dns_nameservers failed") + self.delete_cluster(add_host.id) + + def test_cluster_detail_info(self): + self.private_network_add() + add_cluster = self.add_cluster(**self.cluster_meta2) + cluster_detail = self.get_cluster(add_cluster.id) + self.assertEqual(self.cluster_meta2['networking_parameters']['base_mac'], cluster_detail.base_mac, "cluster base_mac detail failed") + self.assertEqual(self.cluster_meta2['name'], cluster_detail.name, "cluster name detail failed") + self.assertEqual(self.cluster_meta2['description'], cluster_detail.description, "cluster description detail failed") + self.assertEqual(self.cluster_meta2['networking_parameters']['public_vip'], cluster_detail.public_vip, "cluster public_vip detail failed") + self.private_network_delete() + + def test_add_cluster_no_description(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta4) + if cluster_info.description is None: + self.assertEqual(self.cluster_meta4['description'], cluster_info.description, "cluster add interface execute failed") + print "\n ===========cluster_description= %s ", cluster_info.description + print "\n ===========STC-F-Daisy_Cluster-0013 run is over ===============" + self.delete_cluster(cluster_info.id) + + def test_add_cluster_set_auto_scale(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta8) + if cluster_info: + self.assertEqual(self.cluster_meta8['auto_scale'], cluster_info.auto_scale, "cluster add set auto_scale=1 failed") + print "\n ===========cluster auto_scale= %s ", cluster_info.auto_scale + print "\n ===========STC-F-Daisy_Cluster-0020 run is over ===============" + self.delete_cluster(cluster_info.id) + + def test_add_cluster_with_hwm(self): + self.private_network_add() + cluster_info = self.add_cluster(**self.cluster_meta9) + if cluster_info: + self.assertEqual(self.cluster_meta9['hwm_ip'], cluster_info.hwm_ip, + "Add cluster with hwm_ip failed") + self.delete_cluster(cluster_info.id) + + def tearDown(self): + if self.cluster_meta1.get('nodes', None): + del self.cluster_meta1['nodes'] + self._clean_all_cluster() + super(TecsClusterTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_component.py b/test/tempest/tempest/api/daisy/v1/test_component.py new file mode 100755 index 00000000..0562f9fc --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_component.py @@ -0,0 +1,143 @@ + + +from tempest.api.daisy import base +from tempest import config +CONF = config.CONF + + +class DaisyComponentTest(base.BaseDaisyTest): + + @classmethod + def resource_setup(cls): + super(DaisyComponentTest, cls).resource_setup() + cls.host_meta = {'name': 'test_add_host', + 'description': 'test_tempest'} + cls.host_meta_interfaces = {'type': 'ether', + 'name': 'eth1', + 'mac': 'fe80::f816:3eff', + 'ip': '10.43.177.121', + 'netmask': '255.255.254.0', + 'is_deployment': 'True', + 'assigned_networks': ['MANAGEMENT', 'DEPLOYMENT'], + 'slaves': 'eth1'} + + cls.cluster_meta = {'description': 'desc', + 'logic_networks': [{'name': 'external1', + 'physnet_name': 'PRIVATE', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150'], + ['172.16.1.151', + '172.16.1.254']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'external'}, + {'name': 'external2', + 'physnet_name': 'PUBLIC', + 'segmentation_id': 1023, + 'segmentation_type': 'vxlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.2.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.2.130', + '192.168.2.254']], + 'gateway': '192.168.2.1', + 'name': 'subnet123'}], + 'type': 'external'}, + {'name': 'internal1', + 'physnet_name': 'PRIVATE', + 'segmentation_id': '777', + 'segmentation_type': 'vlan', + 'shared': False, + 'subnets': [{'cidr': '192.168.31.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.31.130', + '192.168.31.254']], + 'gateway': '192.168.31.1', + 'name': 'subnet3'}, + {'cidr': '192.168.4.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.4.130', + '192.168.4.254']], + 'gateway': '192.168.4.1', + 'name': 'subnet4'}], + 'type': 'internal'}], + 'name': 'test', + 'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 2000], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,vxlan', + 'vlan_range': [2, 4094], + 'vni_range': [1000, 1030]}, + 'networks': [], + 'nodes': [], + 'routers': [{'description': 'router1', + 'external_logic_network': 'external1', + 'name': 'router1', + 'subnets': ['subnet4', 'subnet3']}, + {'description': 'router2', + 'external_logic_network': 'external2', + 'name': 'router2', + 'subnets': ['subnet2', 'subnet10']}]} + cls.component_meta = {'name': 'test_component', + 'description': 'test'} + + def test_list_component(self): + component_meta = {} + component_flag = True + list_component = self.list_component(**component_meta) + query_component_list = [component_info for component_info in list_component] + component_list = ["camellia", "ha", "loadbalance", "amqp", "database", + "keystone", "ironic", "neutron", + "horizon", "ceilometer", "glance", "heat", "nova", "cinder"] + for query_component in query_component_list: + if query_component.name not in component_list: + component_flag = False + self.assertTrue(component_flag, "test_list_component error") + + def test_add_component(self): + component = self.add_component(**self.component_meta) + self.assertEqual("test_component", component.name, "test_add_component failed") + self.delete_component(component.id) + + def test_component_delete(self): + component = self.add_component(**self.component_meta) + self.delete_component(component.id) + component_flag = True + component_meta = {} + list_component = self.list_component(**component_meta) + query_component_list = [component_info for component_info in list_component] + for query_component in query_component_list: + if component.name == query_component.name: + component_flag = False + self.assertTrue(component_flag, "test_list_component error") + + def test_get_component_detail(self): + add_component_info = self.add_component(**self.component_meta) + get_component = self.get_component(add_component_info.id) + self.assertEqual('test_component', get_component.name) + self.delete_component(get_component.id) + + def test_update_component(self): + add_component_info = self.add_component(**self.component_meta) + update_component_meta = {'name': 'test_update_component', + 'description': 'test_tempest'} + update_component_info = self.update_component(add_component_info.id, **update_component_meta) + self.assertEqual("test_update_component", update_component_info.name, "test_update_component_with_cluster failed") + self.delete_component(add_component_info.id) diff --git a/test/tempest/tempest/api/daisy/v1/test_config_file.py b/test/tempest/tempest/api/daisy/v1/test_config_file.py new file mode 100755 index 00000000..84490579 --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_config_file.py @@ -0,0 +1,70 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.api.daisy import base +from tempest import config +CONF = config.CONF + + +class DaisyConfigFileTest(base.BaseDaisyTest): + @classmethod + def resource_setup(cls): + super(DaisyConfigFileTest, cls).resource_setup() + + def test_add_config_file(self): + config_file = {'name': 'add_config_file', + 'description': 'config_file_test'} + add_config_file = self.add_config_file(**config_file) + self.assertEqual('add_config_file', add_config_file.name) + + def test_update_config_file(self): + config_file_meta = {'name': 'add_config_file', + 'description': 'config_file_test'} + add_config_file = self.add_config_file(**config_file_meta) + + update_config_file_meta = {'name': 'update_config_file'} + update_config_file = self.update_config_file(add_config_file.id, **update_config_file_meta) + + self.assertEqual('update_config_file', update_config_file.name) + + def test_get_config_file(self): + config_file_meta = {'name': 'add_config_file', + 'description': 'config_file_test'} + add_config_file = self.add_config_file(**config_file_meta) + + get_config_file = self.get_config_file(add_config_file.id) + + self.assertEqual('add_config_file', get_config_file.name) + + def test_list_config_file(self): + config_file_flag = False + config_file_meta = {'name': 'add_config_file', + 'description': 'config_file_test'} + self.add_config_file(**config_file_meta) + list_config_file = self.list_config_file() + config_file_list = [config_file for config_file in list_config_file] + if config_file_list: + config_file_flag = True + self.assertTrue(config_file_flag, "test_list_config_file error") + + def test_delete_config_file(self): + config_file = {'name': 'add_config_file', + 'description': 'config_file_test'} + add_config_file = self.add_config_file(**config_file) + self.delete_config_file(add_config_file.id) + + def tearDown(self): + self._clean_all_config_file() + super(DaisyConfigFileTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_discover_host.py b/test/tempest/tempest/api/daisy/v1/test_discover_host.py new file mode 100755 index 00000000..2a3f3ad2 --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_discover_host.py @@ -0,0 +1,121 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.api.daisy import base +from tempest import config +import time +from daisyclient import exc as client_exc +from fake.logical_network_fake import FakeLogicNetwork as logical_fake +CONF = config.CONF + + +class DaisyDiscoverHostTest(base.BaseDaisyTest): + @classmethod + def resource_setup(cls): + super(DaisyDiscoverHostTest, cls).resource_setup() + cls.fake = logical_fake() + + cls.host_meta = {'ip': '127.0.0.1', + 'passwd': 'ossdbg1'} + + def test_add_dicover_host(self): + host = self.add_discover_host(**self.host_meta) + self.assertEqual("init", host.status, "add discover host failed") + self.delete_discover_host(host.id) + + def test_delete_dicover_host(self): + host = self.add_discover_host(**self.host_meta) + self.delete_discover_host(host.id) + + def test_list_discover_host(self): + host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2'} + self.add_discover_host(**self.host_meta) + self.add_discover_host(**host_meta) + query_hosts = self.list_discover_host() + hosts = [host for host in query_hosts] + host_count = len(hosts) + self.assertEqual(2, host_count, "list discover host failed") + + def test_update_discover_host(self): + add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'} + host_1 = self.add_discover_host(**add_host_meta) + self.assertEqual("root", host_1.user, "add discover host failed") + + update_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg1', 'user': 'root2'} + update_host = self.update_discover_host(host_1.id, **update_host_meta) + self.assertEqual("ossdbg1", update_host.passwd, "update discover host failed") + self.assertEqual("root2", update_host.user, "update discover host failed") + + def test_get_discover_host_detail(self): + add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'} + host_1 = self.add_discover_host(**add_host_meta) + host_info = self.get_discover_host_detail(host_1.id) + self.assertEqual("root", host_info.user, "get discover host failed") + self.assertEqual("ossdbg2", host_info.passwd, "get discover host failed") + self.assertEqual("127.0.0.2", host_info.ip, "get discover host failed") + + def test_add_discover_host_without_passwd(self): + add_host_meta = {'ip': '127.0.0.2', 'user': 'root'} + ex = self.assertRaises(client_exc.HTTPBadRequest, self.add_discover_host, **add_host_meta) + self.assertIn("PASSWD parameter can not be None.", str(ex)) + + def test_add_discover_host_with_repeat_ip(self): + # add_host_meta = {'ip': '127.0.0.2', 'passwd': 'ossdbg2', 'user': 'root'} + # host_1 = self.add_discover_host(**add_host_meta) + # ex = self.assertRaises(client_exc.HTTPForbidden, self.add_discover_host, **add_host_meta) + # self.assertIn("403 Forbidden: ip %s already existed." % add_host_meta['ip'], str(ex)) + pass + + def test_discover_host(self): + daisy_endpoint = CONF.daisy.daisy_endpoint + + def GetMiddleStr(content, startStr, endStr): + startIndex = content.index(startStr) + if startIndex >= 0: + startIndex += len(startStr) + endIndex = content.index(endStr) + return content[startIndex:endIndex] + + local_ip = GetMiddleStr(daisy_endpoint, 'http://', ':19292') + discover_host_meta1 = {} + discover_host_meta1['ip'] = local_ip + discover_host_meta1['passwd'] = 'ossdbg1' + self.add_discover_host(**discover_host_meta1) + + discover_host = {} + self.discover_host(**discover_host) + time.sleep(8) + discover_flag = 'false' + while 1: + print("discovring!!!!!!!!") + if discover_flag == 'true': + break + discovery_host_list_generator = self.list_discover_host() + discovery_host_list = [discover_host_tmp for discover_host_tmp in discovery_host_list_generator] + for host in discovery_host_list: + if host.status == 'DISCOVERY_SUCCESSFUL': + discover_flag = 'true' + else: + discover_flag = 'false' + self.assertEqual("true", discover_flag, "discover host failed") + + def tearDown(self): + if self.host_meta.get('user', None): + del self.host_meta['user'] + if self.host_meta.get('status', None): + del self.host_meta['status'] + + self._clean_all_discover_host() + super(DaisyDiscoverHostTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_hwm.py b/test/tempest/tempest/api/daisy/v1/test_hwm.py new file mode 100755 index 00000000..78a969a2 --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_hwm.py @@ -0,0 +1,61 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.api.daisy import base +from tempest import config +from fake.logical_network_fake import FakeLogicNetwork as logical_fake +CONF = config.CONF + + +class DaisyHwmTest(base.BaseDaisyTest): + @classmethod + def resource_setup(cls): + super(DaisyHwmTest, cls).resource_setup() + cls.fake = logical_fake() + + cls.hwm_meta = {'hwm_ip': '10.43.211.63', + 'description': 'the first hwm'} + + def test_add_hwm(self): + hwm = self.add_hwm(**self.hwm_meta) + self.assertEqual("10.43.211.63", hwm.hwm_ip, "add-hwm failed") + + def test_update_hwm(self): + update_hwm_meta = {'hwm_ip': '10.43.174.11'} + add_hwm = self.add_hwm(**self.hwm_meta) + update_hwm = self.update_hwm(add_hwm.id, **update_hwm_meta) + + self.assertEqual("10.43.174.11", update_hwm.hwm_ip, + "update-hwm failed") + + def test_hwm_detail_info(self): + add_hwm = self.add_hwm(**self.hwm_meta) + hwm_detail = self.get_hwm_detail(add_hwm.id) + self.assertEqual("10.43.211.63", hwm_detail.hwm_ip, + "test_hwm_detail_info failed") + + def test_hwm_list(self): + self.add_hwm(**self.hwm_meta) + hwms = self.list_hwm() + for hwm in hwms: + self.assertTrue(hwm is not None) + + def test_hwm_delete(self): + hwm = self.add_hwm(**self.hwm_meta) + self.delete_hwm(hwm.id) + + def tearDown(self): + self._clean_all_hwm() + super(DaisyHwmTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_logical_network.py b/test/tempest/tempest/api/daisy/v1/test_logical_network.py new file mode 100755 index 00000000..d225b7fd --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_logical_network.py @@ -0,0 +1,273 @@ + +import copy + +from daisyclient import exc as client_exc +from tempest.api.daisy import base +from tempest import config +from fake.logical_network_fake import FakeLogicNetwork as logical_fake + +CONF = config.CONF + + +class TecsLogicalNetworkTest(base.BaseDaisyTest): + LOGICAL_FILTER = ['name', 'physnet_name', 'segmentation_id', + 'segmentation_type', 'shared', 'type'] + SUBNET_FILTER = ['name', 'dns_nameservers', 'floating_ranges', 'gateway', 'cidr'] + ROUTER_FILTER = ['name', 'description', 'external_logic_network', 'subnets'] + + @classmethod + def resource_setup(cls): + super(TecsLogicalNetworkTest, cls).resource_setup() + cls.fake = logical_fake() + + def _verify_logical_params(self, cluster_meta, fake_logical): + cluster_meta['logic_networks'] = \ + [dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.LOGICAL_FILTER, logic_network.items())) + for logic_network in cluster_meta['logic_networks']] + + tmp_fake_logical = [dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.LOGICAL_FILTER, + logic_network.items())) + for logic_network in fake_logical] + if cluster_meta['logic_networks'] != tmp_fake_logical: + cluster_meta['logic_networks'].reverse() + + return tmp_fake_logical + + def _verify_router_params(self, cluster_meta): + cluster_meta['routers'] = \ + [dict(filter(lambda paris: paris[0] in TecsLogicalNetworkTest.ROUTER_FILTER, router.items())) + for router in cluster_meta['routers']] + for router in cluster_meta['routers']: + router['subnets'] = copy.deepcopy(list(set(router['subnets']))) + + def private_network_add(self): + # add network plane + private_network_params = self.fake.fake_private_network_parameters() + private_network_params = self.add_network(**private_network_params) + self.private_network_id = private_network_params.id + return copy.deepcopy(private_network_params) + + def private_network_delete(self): + self.delete_network(self.private_network_id) + + # STC-F-Daisy_Logical_Network-0001 + def test_add_all_params(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_logical = self.fake.fake_logical_parameters(private_network) + fake_routers = self.fake.fake_router_parameters() + fake_network = self.fake.fake_network_parameters() + + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical, + 'routers': fake_routers}) + + cluster_info = self.add_cluster(**fake_cluster) + cluster_meta = self.get_cluster(cluster_info.id).to_dict() + + self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network) + + fake_logical = self._verify_logical_params(cluster_meta, fake_logical) + self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical) + + self._verify_router_params(cluster_meta) + self.assertEqual(cluster_meta.get('routers', None), fake_routers) + + self.delete_cluster(cluster_info.id) + + # STC-A-Daisy_Logical_Network-0004 + def test_add_without_logical_parameters_exc(self): + fake_cluster = self.fake.fake_cluster_parameters() + fake_routers = self.fake.fake_router_parameters() + fake_network = self.fake.fake_network_parameters() + + fake_cluster.update({'networking_parameters': fake_network, + 'routers': fake_routers}) + + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Logic_network flat1 is not valid range. (HTTP 400)", + self.add_cluster, **fake_cluster) + + # STC-F-Daisy_Logical_Network-0002 + def test_add_network_params_only(self): + fake_cluster = self.fake.fake_cluster_parameters() + fake_network = self.fake.fake_network_parameters() + + fake_cluster.update({'networking_parameters': fake_network}) + + cluster_info = self.add_cluster(**fake_cluster) + cluster_meta = self.get_cluster(cluster_info.id).to_dict() + + self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network) + self.delete_cluster(cluster_info.id) + + # STC-F-Daisy_Logical_Network-0003 + def test_add_network_and_logical_params(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_logical = self.fake.fake_logical_parameters(private_network) + fake_network = self.fake.fake_network_parameters() + + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical}) + + cluster_info = self.add_cluster(**fake_cluster) + cluster_meta = self.get_cluster(cluster_info.id).to_dict() + + self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network) + + fake_logical = self._verify_logical_params(cluster_meta, fake_logical) + self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical) + self.delete_cluster(cluster_info.id) + + # STC-A-Daisy_Logical_Network-0007 + def test_routers_params_valid_check_exc(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_logical = self.fake.fake_logical_parameters(private_network) + fake_network = self.fake.fake_network_parameters() + fake_router = self.fake.fake_router_parameters2() + + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical, + 'routers': fake_router}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Logic network's subnets is all related with a router, it's not allowed. (HTTP 400)", + self.add_cluster, **fake_cluster) + + tmp_fake_router1 = copy.deepcopy(fake_router) + tmp_fake_router1[0]['name'] = "test" + fake_cluster.update({'routers': tmp_fake_router1}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Logic network's subnets is all related with a router, it's not allowed. (HTTP 400)", + self.add_cluster, **fake_cluster) + + tmp_fake_router2 = copy.deepcopy(fake_router) + tmp_fake_router2[0]['external_logic_network'] = "test" + fake_cluster.update({'routers': tmp_fake_router2}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Logic_network test is not valid range. (HTTP 400)", + self.add_cluster, **fake_cluster) + + tmp_fake_router3 = copy.deepcopy(fake_router) + tmp_fake_router3[0]['subnets'] = ['test'] + fake_cluster.update({'routers': tmp_fake_router3}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Subnet test is not valid range. (HTTP 400)", + self.add_cluster, **fake_cluster) + self.private_network_delete() + + # TODO:name + + # STC-A-Daisy_Logical_Network-0008 + def test_subnets_params_valid_check_exc(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_logical = self.fake.fake_logical_parameters(private_network) + fake_network = self.fake.fake_network_parameters() + + tmp_fake_logical1 = copy.deepcopy(fake_logical) + tmp_fake_logical1[0]['subnets'] = self.fake.fake_subnet_parameters2() + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': tmp_fake_logical1}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Between floating ip range can not be overlap. (HTTP 400)", + self.add_cluster, **fake_cluster) + + tmp_fake_logical2 = copy.deepcopy(self.fake.fake_logical_parameters2()) + tmp_fake_logical2[0].update({'subnets': self.fake.fake_subnet_parameters2()}) + tmp_fake_logical2[0]['subnets'][0].update({'floating_ranges': []}) + tmp_fake_logical2[0]['subnets'][1].update({'floating_ranges': []}) + + fake_cluster.update({'logic_networks': tmp_fake_logical2}) + self.assertRaisesMessage( + client_exc.HTTPBadRequest, + "400 Bad Request: Subnet name segment is repetition. (HTTP 400)", + self.add_cluster, **fake_cluster) + self.private_network_delete() + + # STC-A-Daisy_Logical_Network-0009 + def test_update_all_params(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_network = self.fake.fake_network_parameters() + fake_logical = self.fake.fake_logical_parameters(private_network) + + # add + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical, + 'routers': self.fake.fake_router_parameters()}) + cluster_id1 = self.add_cluster(**fake_cluster).id + + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical, + 'routers': self.fake.fake_router_parameters()}) + + # update + cluster_id2 = self.update_cluster(cluster_id1, **fake_cluster) + cluster_meta = self.get_cluster(cluster_id2).to_dict() + + # check + self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network) + + tmp_fake_logical = self._verify_logical_params(cluster_meta, fake_logical) + self.assertEqual(cluster_meta.get('logic_networks', None), tmp_fake_logical) + + self._verify_router_params(cluster_meta) + self.assertEqual(cluster_meta.get('routers', None), self.fake.fake_router_parameters()) + + self.delete_cluster(cluster_id2) + + # STC-A-Daisy_Logical_Network-0010 + def test_get_all_params(self): + private_network = self.private_network_add() + fake_cluster = self.fake.fake_cluster_parameters(private_network) + fake_logical = self.fake.fake_logical_parameters(private_network) + fake_routers = self.fake.fake_router_parameters() + fake_network = self.fake.fake_network_parameters() + + fake_cluster.update({'networking_parameters': fake_network, + 'logic_networks': fake_logical, + 'routers': fake_routers}) + + cluster_info = self.add_cluster(**fake_cluster) + cluster_meta = self.get_cluster(cluster_info.id).to_dict() + + self.assertEqual(cluster_meta.get('networking_parameters', None), fake_network) + + fake_logical = self._verify_logical_params(cluster_meta, fake_logical) + self.assertEqual(cluster_meta.get('logic_networks', None), fake_logical) + + self._verify_router_params(cluster_meta) + self.assertEqual(cluster_meta.get('routers', None), fake_routers) + + self.delete_cluster(cluster_info.id) + + # STC-A-Daisy_Logical_Network-0011 + def test_delete_all_params(self): + fake_cluster = self.fake.fake_cluster_parameters() + + cluster_info = self.add_cluster(**fake_cluster) + cluster_meta = self.get_cluster(cluster_info.id).to_dict() + + default_networking_parameters = {u'base_mac': None, + u'gre_id_range': [None, None], + u'net_l23_provider': None, + u'public_vip': None, + u'segmentation_type': None, + u'vlan_range': [None, None], + u'vni_range': [None, None]} + self.assertEqual(default_networking_parameters, cluster_meta.get('networking_parameters', None)) + self.assertEqual([], cluster_meta.get('logic_networks', None)) + self.assertEqual([], cluster_meta.get('routers', None)) + + self.delete_cluster(cluster_info.id) + + def tearDown(self): + super(TecsLogicalNetworkTest, self).tearDown() diff --git a/test/tempest/tempest/api/daisy/v1/test_service.py b/test/tempest/tempest/api/daisy/v1/test_service.py new file mode 100755 index 00000000..33c439ad --- /dev/null +++ b/test/tempest/tempest/api/daisy/v1/test_service.py @@ -0,0 +1,152 @@ + + +from tempest.api.daisy import base +from tempest import config +CONF = config.CONF + + +class DaisyServiceTest(base.BaseDaisyTest): + + @classmethod + def resource_setup(cls): + super(DaisyServiceTest, cls).resource_setup() + cls.host_meta = {'name': 'test_add_host', + 'description': 'test_tempest'} + cls.host_meta_interfaces = {'type': 'ether', + 'name': 'eth1', + 'mac': 'fe80::f816:3eff', + 'ip': '10.43.177.121', + 'netmask': '255.255.254.0', + 'is_deployment': 'True', + 'assigned_networks': ['MANAGEMENT', 'DEPLOYMENT'], + 'slaves': 'eth1'} + + cls.cluster_meta = {'description': 'desc', + 'logic_networks': [{'name': 'external1', + 'physnet_name': 'PRIVATE', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150'], + ['172.16.1.151', + '172.16.1.254']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'external'}, + {'name': 'external2', + 'physnet_name': 'PUBLIC', + 'segmentation_id': 1023, + 'segmentation_type': 'vxlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.2.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.2.130', + '192.168.2.254']], + 'gateway': '192.168.2.1', + 'name': 'subnet123'}], + 'type': 'external'}, + {'name': 'internal1', + 'physnet_name': 'PRIVATE', + 'segmentation_id': '777', + 'segmentation_type': 'vlan', + 'shared': False, + 'subnets': [{'cidr': '192.168.31.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.31.130', + '192.168.31.254']], + 'gateway': '192.168.31.1', + 'name': 'subnet3'}, + {'cidr': '192.168.4.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.4.130', + '192.168.4.254']], + 'gateway': '192.168.4.1', + 'name': 'subnet4'}], + 'type': 'internal'}], + 'name': 'test', + 'networking_parameters': {'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 2000], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,vxlan', + 'vlan_range': [2, 4094], + 'vni_range': [1000, 1030]}, + 'networks': [], + 'nodes': [], + 'routers': [{'description': 'router1', + 'external_logic_network': 'external1', + 'name': 'router1', + 'subnets': ['subnet4', 'subnet3']}, + {'description': 'router2', + 'external_logic_network': 'external2', + 'name': 'router2', + 'subnets': ['subnet2', 'subnet10']}]} + cls.service_meta = {'name': 'test_service', 'description': 'test'} + + def test_list_service(self): + service_meta = {} + service_flag = True + list_service = self.list_service(**service_meta) + query_service_list = [service_info for service_info in list_service] + service_list = ["lb", "ha", "mariadb", "amqp", + "ceilometer-central", "ceilometer-alarm", + "ceilometer-notification", "ceilometer-collector", + "heat-engine", "ceilometer-api", "heat-api-cfn", + "heat-api", "horizon", "neutron-metadata", + "neutron-dhcp", "neutron-server", "neutron-l3", + "keystone", "cinder-volume", "cinder-api", + "cinder-scheduler", "glance", "ironic", "compute", + "nova-cert", "nova-sched", "nova-vncproxy", + "nova-conductor", "nova-api"] + for service in service_list: + for query_service in query_service_list: + if service == query_service.name: + break + else: + service_flag = False + self.assertTrue(service_flag, "test_list_service error") + + def test_add_service(self): + service = self.add_service(**self.service_meta) + self.assertEqual("test_service", service.name, "test_add_service failed") + self.delete_service(service.id) + + def test_service_delete(self): + service = self.add_service(**self.service_meta) + self.delete_service(service.id) + service_flag = True + service_meta = {} + list_service = self.list_service(**service_meta) + query_service_list = [service_info for service_info in list_service] + for query_service in query_service_list: + if service.name == query_service.name: + service_flag = False + self.assertTrue(service_flag, "test_list_service error") + + def test_get_service_detail(self): + add_service_info = self.add_service(**self.service_meta) + get_service = self.get_service(add_service_info.id) + self.assertEqual('test_service', get_service.name) + self.delete_service(get_service.id) + + def test_update_service(self): + add_service_info = self.add_service(**self.service_meta) + update_service_meta = {'name': 'test_update_service', + 'description': 'test_tempest'} + update_service_info = self.update_service(add_service_info.id, **update_service_meta) + self.assertEqual("test_update_service", update_service_info.name, "test_update_service_with_cluster failed") + self.delete_service(add_service_info.id) diff --git a/test/tempest/tempest/config.py b/test/tempest/tempest/config.py new file mode 100755 index 00000000..469a96f7 --- /dev/null +++ b/test/tempest/tempest/config.py @@ -0,0 +1,1436 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import logging as std_logging +import os + +from oslo_config import cfg + +from oslo_log import log as logging + + +# TODO(marun) Replace use of oslo_config's global ConfigOpts +# (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once +# the cli tests move to the clients. The cli tests rely on oslo +# incubator modules that use the global cfg.CONF. +_CONF = cfg.CONF + + +def register_opt_group(conf, opt_group, options): + conf.register_group(opt_group) + for opt in options: + conf.register_opt(opt, group=opt_group.name) + + +auth_group = cfg.OptGroup(name='auth', + title="Options for authentication and credentials") + + +AuthGroup = [ + cfg.StrOpt('test_accounts_file', + help="Path to the yaml file that contains the list of " + "credentials to use for running tests. If used when " + "running in parallel you have to make sure sufficient " + "credentials are provided in the accounts file. For " + "example if no tests with roles are being run it requires " + "at least `2 * CONC` distinct accounts configured in " + " the `test_accounts_file`, with CONC == the " + "number of concurrent test processes."), + cfg.BoolOpt('allow_tenant_isolation', + default=False, + help="Allows test cases to create/destroy tenants and " + "users. This option requires that OpenStack Identity " + "API admin credentials are known. If false, isolated " + "test cases and parallel execution, can still be " + "achieved configuring a list of test accounts", + deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation', + group='compute'), + cfg.DeprecatedOpt('allow_tenant_isolation', + group='orchestration')]), + cfg.ListOpt('tempest_roles', + help="Roles to assign to all users created by tempest", + default=[]), + cfg.StrOpt('tenant_isolation_domain_name', + default=None, + help="Only applicable when identity.auth_version is v3." + "Domain within which isolated credentials are provisioned." + "The default \"None\" means that the domain from the" + "admin user is used instead.") +] + +identity_group = cfg.OptGroup(name='identity', + title="Keystone Configuration Options") + +IdentityGroup = [ + cfg.StrOpt('catalog_type', + default='identity', + help="Catalog type of the Identity service."), + cfg.BoolOpt('disable_ssl_certificate_validation', + default=False, + help="Set to True if using self-signed SSL certificates."), + cfg.StrOpt('ca_certificates_file', + default=None, + help='Specify a CA bundle file to use in verifying a ' + 'TLS (https) server certificate.'), + cfg.StrOpt('uri', + help="Full URI of the OpenStack Identity API (Keystone), v2"), + cfg.StrOpt('uri_v3', + help='Full URI of the OpenStack Identity API (Keystone), v3'), + cfg.StrOpt('auth_version', + default='v2', + help="Identity API version to be used for authentication " + "for API tests."), + cfg.StrOpt('region', + default='RegionOne', + help="The identity region name to use. Also used as the other " + "services' region name unless they are set explicitly. " + "If no such region is found in the service catalog, the " + "first found one is used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the identity service."), + cfg.StrOpt('username', + help="Username to use for Nova API requests."), + cfg.StrOpt('tenant_name', + help="Tenant name to use for Nova API requests."), + cfg.StrOpt('admin_role', + default='admin', + help="Role required to administrate keystone."), + cfg.StrOpt('password', + help="API key to use when authenticating.", + secret=True), + cfg.StrOpt('domain_name', + help="Domain name for authentication (Keystone V3)." + "The same domain applies to user and project"), + cfg.StrOpt('alt_username', + help="Username of alternate user to use for Nova API " + "requests."), + cfg.StrOpt('alt_tenant_name', + help="Alternate user's Tenant name to use for Nova API " + "requests."), + cfg.StrOpt('alt_password', + help="API key to use when authenticating as alternate user.", + secret=True), + cfg.StrOpt('alt_domain_name', + help="Alternate domain name for authentication (Keystone V3)." + "The same domain applies to user and project"), + cfg.StrOpt('admin_username', + help="Administrative Username to use for " + "Keystone API requests."), + cfg.StrOpt('admin_tenant_name', + help="Administrative Tenant name to use for Keystone API " + "requests."), + cfg.StrOpt('admin_password', + help="API key to use when authenticating as admin.", + secret=True), + cfg.StrOpt('admin_domain_name', + help="Admin domain name for authentication (Keystone V3)." + "The same domain applies to user and project"), +] + +identity_feature_group = cfg.OptGroup(name='identity-feature-enabled', + title='Enabled Identity Features') + +IdentityFeatureGroup = [ + cfg.BoolOpt('trust', + default=True, + help='Does the identity service have delegation and ' + 'impersonation enabled'), + cfg.BoolOpt('api_v2', + default=True, + help='Is the v2 identity API enabled'), + cfg.BoolOpt('api_v3', + default=True, + help='Is the v3 identity API enabled'), +] + +compute_group = cfg.OptGroup(name='compute', + title='Compute Service Options') + +ComputeGroup = [ + cfg.StrOpt('image_ref', + help="Valid primary image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ref_alt', + help="Valid secondary image reference to be used in tests. " + "This is a required option, but if only one image is " + "available duplicate the value of image_ref above"), + cfg.StrOpt('image_ref_xp', + help="Valid XP image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ssh_xp_user', + default="administrator", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ssh_xp_pwd', + default="null", + help="Password used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ref_vmb', + help="Valid VMB image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ssh_vmb_user', + default="zte", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ssh_vmb_pwd', + default="zte", + help="Password used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ref_linux', + help="Valid Linux-CGSLV5 image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ssh_linux_user', + default="root", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ssh_linux_pwd', + default="ossdbg1", + help="Password used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ref_suse', + help="Valid SUSE image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ssh_suse_user', + default="root", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ssh_suse_pwd', + default="it@123456", + help="Password used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ref_centos7', + help="Valid CENTOS7 image reference to be used in tests. " + "This is a required option"), + cfg.StrOpt('image_ssh_centos7_user', + default="root", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.StrOpt('image_ssh_centos7_pwd', + default="ossdbg1", + help="Password used to authenticate to an instance using " + "the alternate image."), + cfg.BoolOpt('use_sdn', + default=False, + help="whether use SDN."), + cfg.StrOpt('flavor_ref', + default="1", + help="Valid primary flavor to use in tests."), + cfg.StrOpt('flavor_ref_alt', + default="2", + help='Valid secondary flavor to be used in tests.'), + cfg.StrOpt('image_ssh_user', + default="root", + help="User name used to authenticate to an instance."), + cfg.StrOpt('image_ssh_password', + default="password", + help="Password used to authenticate to an instance."), + cfg.StrOpt('image_alt_ssh_user', + default="root", + help="User name used to authenticate to an instance using " + "the alternate image."), + cfg.IntOpt('build_interval', + default=1, + help="Time in seconds between build status checks."), + cfg.IntOpt('build_timeout', + default=300, + help="Timeout in seconds to wait for an instance to build. " + "Other services that do not define build_timeout will " + "inherit this value."), + cfg.BoolOpt('run_ssh', + default=False, + help="Should the tests ssh to instances?"), + cfg.StrOpt('ssh_auth_method', + default='keypair', + help="Auth method used for authenticate to the instance. " + "Valid choices are: keypair, configured, adminpass " + "and disabled. " + "Keypair: start the servers with a ssh keypair. " + "Configured: use the configured user and password. " + "Adminpass: use the injected adminPass. " + "Disabled: avoid using ssh when it is an option."), + cfg.StrOpt('ssh_connect_method', + default='floating', + help="How to connect to the instance? " + "fixed: using the first ip belongs the fixed network " + "floating: creating and using a floating ip."), + cfg.StrOpt('ssh_user', + default='root', + help="User name used to authenticate to an instance."), + cfg.IntOpt('ping_timeout', + default=120, + help="Timeout in seconds to wait for ping to " + "succeed."), + cfg.IntOpt('ping_size', + default=56, + help="The packet size for ping packets originating " + "from remote linux hosts"), + cfg.IntOpt('ping_count', + default=1, + help="The number of ping packets originating from remote " + "linux hosts"), + cfg.IntOpt('ssh_timeout', + default=300, + help="Timeout in seconds to wait for authentication to " + "succeed."), + cfg.IntOpt('ready_wait', + default=0, + help="Additional wait time for clean state, when there is " + "no OS-EXT-STS extension available"), + cfg.IntOpt('ssh_channel_timeout', + default=60, + help="Timeout in seconds to wait for output from ssh " + "channel."), + cfg.StrOpt('fixed_network_name', + help="Name of the fixed network that is visible to all test " + "tenants. If multiple networks are available for a tenant" + " this is the network which will be used for creating " + "servers if tempest does not create a network or a " + "network is not specified elsewhere. It may be used for " + "ssh validation only if floating IPs are disabled."), + cfg.StrOpt('fixed_network_name_external', + default='private', + help="Visible fixed network name external "), + cfg.StrOpt('multi_create_flavor_id', + default='3', + help="Valid flavor reference to be used in multiple create " + "servers. This is a required option"), + cfg.IntOpt('multi_create_max_count', + default=5, + help="Max count of multiple creating servers."), + cfg.StrOpt('v4_image_file', + default='', + help="V4 image file where stored."), + cfg.StrOpt('network_for_ssh', + default='public', + help="Network used for SSH connections. Ignored if " + "use_floatingip_for_ssh=true or run_ssh=false."), + cfg.IntOpt('ip_version_for_ssh', + default=4, + help="IP version used for SSH connections."), + cfg.BoolOpt('use_floatingip_for_ssh', + default=True, + help="Does SSH use Floating IPs?"), + cfg.StrOpt('catalog_type', + default='compute', + help="Catalog type of the Compute service."), + cfg.StrOpt('region', + default='', + help="The compute region name to use. If empty, the value " + "of identity.region is used instead. If no such region " + "is found in the service catalog, the first found one is " + "used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the compute service."), + cfg.StrOpt('volume_device_name', + default='vdb', + help="Expected device name when a volume is attached to " + "an instance"), + cfg.IntOpt('shelved_offload_time', + default=0, + help='Time in seconds before a shelved instance is eligible ' + 'for removing from a host. -1 never offload, 0 offload ' + 'when shelved. This time should be the same as the time ' + 'of nova.conf, and some tests will run for as long as the ' + 'time.'), + cfg.StrOpt('floating_ip_range', + default='10.0.0.0/29', + help='Unallocated floating IP range, which will be used to ' + 'test the floating IP bulk feature for CRUD operation. ' + 'This block must not overlap an existing floating IP ' + 'pool.') +] + +compute_features_group = cfg.OptGroup(name='compute-feature-enabled', + title="Enabled Compute Service Features") + +ComputeFeaturesGroup = [ + cfg.BoolOpt('disk_config', + default=True, + help="If false, skip disk config tests"), + cfg.ListOpt('api_extensions', + default=['all'], + help='A list of enabled compute extensions with a special ' + 'entry all which indicates every extension is enabled. ' + 'Each extension should be specified with alias name. ' + 'Empty list indicates all extensions are disabled'), + cfg.BoolOpt('change_password', + default=False, + help="Does the test environment support changing the admin " + "password?"), + cfg.BoolOpt('console_output', + default=True, + help="Does the test environment support obtaining instance " + "serial console output?"), + cfg.BoolOpt('resize', + default=False, + help="Does the test environment support resizing?"), + cfg.BoolOpt('pause', + default=True, + help="Does the test environment support pausing?"), + cfg.BoolOpt('shelve', + default=True, + help="Does the test environment support shelving/unshelving?"), + cfg.BoolOpt('suspend', + default=True, + help="Does the test environment support suspend/resume?"), + cfg.BoolOpt('live_migration', + default=True, + help="Does the test environment support live migration " + "available?"), + cfg.BoolOpt('block_migration_for_live_migration', + default=True, + help="Does the test environment use block devices for live " + "migration"), + cfg.BoolOpt('block_migrate_cinder_iscsi', + default=False, + help="Does the test environment block migration support " + "cinder iSCSI volumes. Note, libvirt doesn't support this, " + "see https://bugs.launchpad.net/nova/+bug/1398999"), + cfg.BoolOpt('vnc_console', + default=False, + help='Enable VNC console. This configuration value should ' + 'be same as [nova.vnc]->vnc_enabled in nova.conf'), + cfg.BoolOpt('spice_console', + default=False, + help='Enable Spice console. This configuration value should ' + 'be same as [nova.spice]->enabled in nova.conf'), + cfg.BoolOpt('rdp_console', + default=False, + help='Enable RDP console. This configuration value should ' + 'be same as [nova.rdp]->enabled in nova.conf'), + cfg.BoolOpt('rescue', + default=True, + help='Does the test environment support instance rescue ' + 'mode?'), + cfg.BoolOpt('enable_instance_password', + default=True, + help='Enables returning of the instance password by the ' + 'relevant server API calls such as create, rebuild ' + 'or rescue.'), + cfg.BoolOpt('interface_attach', + default=False, + help='Does the test environment support dynamic network ' + 'interface attachment?'), + cfg.BoolOpt('snapshot', + default=True, + help='Does the test environment support creating snapshot ' + 'images of running instances?'), + cfg.BoolOpt('ec2_api', + default=True, + help='Does the test environment have the ec2 api running?'), + # TODO(mriedem): Remove preserve_ports once juno-eol happens. + cfg.BoolOpt('preserve_ports', + default=False, + help='Does Nova preserve preexisting ports from Neutron ' + 'when deleting an instance? This should be set to True ' + 'if testing Kilo+ Nova.') +] + + +image_group = cfg.OptGroup(name='image', + title="Image Service Options") + +ImageGroup = [ + cfg.StrOpt('catalog_type', + default='image', + help='Catalog type of the Image service.'), + cfg.StrOpt('region', + default='', + help="The image region name to use. If empty, the value " + "of identity.region is used instead. If no such region " + "is found in the service catalog, the first found one is " + "used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the image service."), + cfg.StrOpt('http_image', + default='http://download.cirros-cloud.net/0.3.1/' + 'cirros-0.3.1-x86_64-uec.tar.gz', + help='http accessible image'), + cfg.IntOpt('build_timeout', + default=300, + help="Timeout in seconds to wait for an image to " + "become available."), + cfg.IntOpt('build_interval', + default=1, + help="Time in seconds between image operation status " + "checks.") +] + +image_feature_group = cfg.OptGroup(name='image-feature-enabled', + title='Enabled image service features') + +ImageFeaturesGroup = [ + cfg.BoolOpt('api_v2', + default=True, + help="Is the v2 image API enabled"), + cfg.BoolOpt('api_v1', + default=True, + help="Is the v1 image API enabled"), +] + +network_group = cfg.OptGroup(name='network', + title='Network Service Options') + +NetworkGroup = [ + cfg.StrOpt('internal_network_id', + default='private', + help="internal network id "), + cfg.StrOpt('catalog_type', + default='network', + help='Catalog type of the Neutron service.'), + cfg.StrOpt('region', + default='', + help="The network region name to use. If empty, the value " + "of identity.region is used instead. If no such region " + "is found in the service catalog, the first found one is " + "used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the network service."), + cfg.StrOpt('tenant_network_cidr', + default="10.100.0.0/16", + help="The cidr block to allocate tenant ipv4 subnets from"), + cfg.IntOpt('tenant_network_mask_bits', + default=28, + help="The mask bits for tenant ipv4 subnets"), + cfg.StrOpt('tenant_network_v6_cidr', + default="2003::/48", + help="The cidr block to allocate tenant ipv6 subnets from"), + cfg.IntOpt('tenant_network_v6_mask_bits', + default=64, + help="The mask bits for tenant ipv6 subnets"), + cfg.BoolOpt('tenant_networks_reachable', + default=False, + help="Whether tenant networks can be reached directly from " + "the test client. This must be set to True when the " + "'fixed' ssh_connect_method is selected."), + cfg.StrOpt('public_network_id', + default="", + help="Id of the public network that provides external " + "connectivity"), + cfg.StrOpt('floating_network_name', + help="Default floating network name. Used to allocate floating " + "IPs when neutron is enabled."), + cfg.StrOpt('public_router_id', + default="", + help="Id of the public router that provides external " + "connectivity. This should only be used when Neutron's " + "'allow_overlapping_ips' is set to 'False' in " + "neutron.conf. usually not needed past 'Grizzly' release"), + cfg.IntOpt('build_timeout', + default=300, + help="Timeout in seconds to wait for network operation to " + "complete."), + cfg.IntOpt('build_interval', + default=1, + help="Time in seconds between network operation status " + "checks."), + cfg.ListOpt('dns_servers', + default=["8.8.8.8", "8.8.4.4"], + help="List of dns servers which should be used" + " for subnet creation"), + cfg.StrOpt('port_vnic_type', + choices=[None, 'normal', 'direct', 'macvtap'], + help="vnic_type to use when Launching instances" + " with pre-configured ports." + " Supported ports are:" + " ['normal','direct','macvtap']"), +] + +network_feature_group = cfg.OptGroup(name='network-feature-enabled', + title='Enabled network service features') + +NetworkFeaturesGroup = [ + cfg.BoolOpt('ipv6', + default=True, + help="Allow the execution of IPv6 tests"), + cfg.ListOpt('api_extensions', + default=['all'], + help='A list of enabled network extensions with a special ' + 'entry all which indicates every extension is enabled. ' + 'Empty list indicates all extensions are disabled'), + cfg.BoolOpt('ipv6_subnet_attributes', + default=False, + help="Allow the execution of IPv6 subnet tests that use " + "the extended IPv6 attributes ipv6_ra_mode " + "and ipv6_address_mode" + ), + cfg.BoolOpt('port_admin_state_change', + default=True, + help="Does the test environment support changing" + " port admin state"), +] + +messaging_group = cfg.OptGroup(name='messaging', + title='Messaging Service') + +MessagingGroup = [ + cfg.StrOpt('catalog_type', + default='messaging', + help='Catalog type of the Messaging service.'), + cfg.IntOpt('max_queues_per_page', + default=20, + help='The maximum number of queue records per page when ' + 'listing queues'), + cfg.IntOpt('max_queue_metadata', + default=65536, + help='The maximum metadata size for a queue'), + cfg.IntOpt('max_messages_per_page', + default=20, + help='The maximum number of queue message per page when ' + 'listing (or) posting messages'), + cfg.IntOpt('max_message_size', + default=262144, + help='The maximum size of a message body'), + cfg.IntOpt('max_messages_per_claim', + default=20, + help='The maximum number of messages per claim'), + cfg.IntOpt('max_message_ttl', + default=1209600, + help='The maximum ttl for a message'), + cfg.IntOpt('max_claim_ttl', + default=43200, + help='The maximum ttl for a claim'), + cfg.IntOpt('max_claim_grace', + default=43200, + help='The maximum grace period for a claim'), +] + +validation_group = cfg.OptGroup(name='validation', + title='SSH Validation options') + +ValidationGroup = [ + cfg.StrOpt('connect_method', + default='floating', + choices=['fixed', 'floating'], + help='Default IP type used for validation: ' + '-fixed: uses the first IP belonging to the fixed network ' + '-floating: creates and uses a floating IP'), + cfg.StrOpt('auth_method', + default='keypair', + choices=['keypair'], + help='Default authentication method to the instance. ' + 'Only ssh via keypair is supported for now. ' + 'Additional methods will be handled in a separate spec.'), + cfg.IntOpt('ip_version_for_ssh', + default=4, + help='Default IP version for ssh connections.'), + cfg.IntOpt('ping_timeout', + default=120, + help='Timeout in seconds to wait for ping to succeed.'), + cfg.IntOpt('connect_timeout', + default=60, + help='Timeout in seconds to wait for the TCP connection to be ' + 'successful.'), + cfg.IntOpt('ssh_timeout', + default=300, + help='Timeout in seconds to wait for the ssh banner.'), +] + +volume_group = cfg.OptGroup(name='volume', + title='Block Storage Options') + +VolumeGroup = [ + cfg.IntOpt('build_interval', + default=1, + help='Time in seconds between volume availability checks.'), + cfg.IntOpt('build_timeout', + default=300, + help='Timeout in seconds to wait for a volume to become ' + 'available.'), + cfg.StrOpt('catalog_type', + default='volume', + help="Catalog type of the Volume Service"), + cfg.StrOpt('region', + default='', + help="The volume region name to use. If empty, the value " + "of identity.region is used instead. If no such region " + "is found in the service catalog, the first found one is " + "used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the volume service."), + cfg.StrOpt('backend1_name', + default='BACKEND_1', + help="Name of the backend1 (must be declared in cinder.conf)"), + cfg.StrOpt('backend2_name', + default='BACKEND_2', + help="Name of the backend2 (must be declared in cinder.conf)"), + cfg.StrOpt('storage_protocol', + default='iSCSI', + help='Backend protocol to target when creating volume types'), + cfg.StrOpt('vendor_name', + default='Open Source', + help='Backend vendor to target when creating volume types'), + cfg.StrOpt('disk_format', + default='raw', + help='Disk format to use when copying a volume to image'), + cfg.IntOpt('volume_size', + default=1, + help='Default size in GB for volumes created by volumes tests'), +] + +volume_feature_group = cfg.OptGroup(name='volume-feature-enabled', + title='Enabled Cinder Features') + +VolumeFeaturesGroup = [ + cfg.BoolOpt('multi_backend', + default=False, + help="Runs Cinder multi-backend test (requires 2 backends)"), + cfg.BoolOpt('backup', + default=False, + help='Runs Cinder volumes backup test'), + cfg.BoolOpt('snapshot', + default=False, + help='Runs Cinder volume snapshot test'), + cfg.ListOpt('api_extensions', + default=['all'], + help='A list of enabled volume extensions with a special ' + 'entry all which indicates every extension is enabled. ' + 'Empty list indicates all extensions are disabled'), + cfg.BoolOpt('api_v1', + default=True, + help="Is the v1 volume API enabled"), + cfg.BoolOpt('api_v2', + default=True, + help="Is the v2 volume API enabled"), +] + + +object_storage_group = cfg.OptGroup(name='object-storage', + title='Object Storage Service Options') + +ObjectStoreGroup = [ + cfg.StrOpt('catalog_type', + default='object-store', + help="Catalog type of the Object-Storage service."), + cfg.StrOpt('region', + default='', + help="The object-storage region name to use. If empty, the " + "value of identity.region is used instead. If no such " + "region is found in the service catalog, the first found " + "one is used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the object-store service."), + cfg.IntOpt('container_sync_timeout', + default=600, + help="Number of seconds to time on waiting for a container " + "to container synchronization complete."), + cfg.IntOpt('container_sync_interval', + default=5, + help="Number of seconds to wait while looping to check the " + "status of a container to container synchronization"), + cfg.StrOpt('operator_role', + default='Member', + help="Role to add to users created for swift tests to " + "enable creating containers"), + cfg.StrOpt('reseller_admin_role', + default='ResellerAdmin', + help="User role that has reseller admin"), + cfg.StrOpt('realm_name', + default='realm1', + help="Name of sync realm. A sync realm is a set of clusters " + "that have agreed to allow container syncing with each " + "other. Set the same realm name as Swift's " + "container-sync-realms.conf"), + cfg.StrOpt('cluster_name', + default='name1', + help="One name of cluster which is set in the realm whose name " + "is set in 'realm_name' item in this file. Set the " + "same cluster name as Swift's container-sync-realms.conf"), +] + +object_storage_feature_group = cfg.OptGroup( + name='object-storage-feature-enabled', + title='Enabled object-storage features') + +ObjectStoreFeaturesGroup = [ + cfg.ListOpt('discoverable_apis', + default=['all'], + help="A list of the enabled optional discoverable apis. " + "A single entry, all, indicates that all of these " + "features are expected to be enabled"), + cfg.BoolOpt('container_sync', + default=True, + help="Execute (old style) container-sync tests"), + cfg.BoolOpt('object_versioning', + default=True, + help="Execute object-versioning tests"), + cfg.BoolOpt('discoverability', + default=True, + help="Execute discoverability tests"), +] + +database_group = cfg.OptGroup(name='database', + title='Database Service Options') + +DatabaseGroup = [ + cfg.StrOpt('catalog_type', + default='database', + help="Catalog type of the Database service."), + cfg.StrOpt('db_flavor_ref', + default="1", + help="Valid primary flavor to use in database tests."), + cfg.StrOpt('db_current_version', + default="v1.0", + help="Current database version to use in database tests."), +] + +orchestration_group = cfg.OptGroup(name='orchestration', + title='Orchestration Service Options') + +OrchestrationGroup = [ + cfg.StrOpt('catalog_type', + default='orchestration', + help="Catalog type of the Orchestration service."), + cfg.StrOpt('region', + default='', + help="The orchestration region name to use. If empty, the " + "value of identity.region is used instead. If no such " + "region is found in the service catalog, the first found " + "one is used."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the orchestration service."), + cfg.StrOpt('stack_owner_role', default='heat_stack_owner', + help='Role required for users to be able to manage stacks'), + cfg.IntOpt('build_interval', + default=1, + help="Time in seconds between build status checks."), + cfg.IntOpt('build_timeout', + default=1200, + help="Timeout in seconds to wait for a stack to build."), + cfg.StrOpt('instance_type', + default='m1.micro', + help="Instance type for tests. Needs to be big enough for a " + "full OS plus the test workload"), + cfg.StrOpt('keypair_name', + help="Name of existing keypair to launch servers with."), + cfg.IntOpt('max_template_size', + default=524288, + help="Value must match heat configuration of the same name."), + cfg.IntOpt('max_resources_per_stack', + default=1000, + help="Value must match heat configuration of the same name."), +] + + +telemetry_group = cfg.OptGroup(name='telemetry', + title='Telemetry Service Options') + +TelemetryGroup = [ + cfg.StrOpt('catalog_type', + default='metering', + help="Catalog type of the Telemetry service."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the telemetry service."), + cfg.BoolOpt('too_slow_to_test', + default=True, + help="This variable is used as flag to enable " + "notification tests") +] + + +dashboard_group = cfg.OptGroup(name="dashboard", + title="Dashboard options") + +DashboardGroup = [ + cfg.StrOpt('dashboard_url', + default='http://localhost/', + help="Where the dashboard can be found"), + cfg.StrOpt('login_url', + default='http://localhost/auth/login/', + help="Login page for the dashboard"), +] + + +data_processing_group = cfg.OptGroup(name="data_processing", + title="Data Processing options") + +DataProcessingGroup = [ + cfg.StrOpt('catalog_type', + default='data_processing', + help="Catalog type of the data processing service."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the data processing " + "service."), +] + + +data_processing_feature_group = cfg.OptGroup( + name="data_processing-feature-enabled", + title="Enabled Data Processing features") + +DataProcessingFeaturesGroup = [ + cfg.ListOpt('plugins', + default=["vanilla", "hdp"], + help="List of enabled data processing plugins") +] + + +boto_group = cfg.OptGroup(name='boto', + title='EC2/S3 options') +BotoGroup = [ + cfg.StrOpt('ec2_url', + default="http://localhost:8773/services/Cloud", + help="EC2 URL"), + cfg.StrOpt('s3_url', + default="http://localhost:8080", + help="S3 URL"), + cfg.StrOpt('aws_secret', + help="AWS Secret Key", + secret=True), + cfg.StrOpt('aws_access', + help="AWS Access Key"), + cfg.StrOpt('aws_zone', + default="nova", + help="AWS Zone for EC2 tests"), + cfg.StrOpt('s3_materials_path', + default="/opt/stack/devstack/files/images/" + "s3-materials/cirros-0.3.0", + help="S3 Materials Path"), + cfg.StrOpt('ari_manifest', + default="cirros-0.3.0-x86_64-initrd.manifest.xml", + help="ARI Ramdisk Image manifest"), + cfg.StrOpt('ami_manifest', + default="cirros-0.3.0-x86_64-blank.img.manifest.xml", + help="AMI Machine Image manifest"), + cfg.StrOpt('aki_manifest', + default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml", + help="AKI Kernel Image manifest"), + cfg.StrOpt('instance_type', + default="m1.tiny", + help="Instance type"), + cfg.IntOpt('http_socket_timeout', + default=3, + help="boto Http socket timeout"), + cfg.IntOpt('num_retries', + default=1, + help="boto num_retries on error"), + cfg.IntOpt('build_timeout', + default=60, + help="Status Change Timeout"), + cfg.IntOpt('build_interval', + default=1, + help="Status Change Test Interval"), +] + +stress_group = cfg.OptGroup(name='stress', title='Stress Test Options') + +StressGroup = [ + cfg.StrOpt('nova_logdir', + help='Directory containing log files on the compute nodes'), + cfg.IntOpt('max_instances', + default=16, + help='Maximum number of instances to create during test.'), + cfg.StrOpt('controller', + help='Controller host.'), + # new stress options + cfg.StrOpt('target_controller', + help='Controller host.'), + cfg.StrOpt('target_ssh_user', + help='ssh user.'), + cfg.StrOpt('target_private_key_path', + help='Path to private key.'), + cfg.StrOpt('target_logfiles', + help='regexp for list of log files.'), + cfg.IntOpt('log_check_interval', + default=60, + help='time (in seconds) between log file error checks.'), + cfg.IntOpt('default_thread_number_per_action', + default=4, + help='The number of threads created while stress test.'), + cfg.BoolOpt('leave_dirty_stack', + default=False, + help='Prevent the cleaning (tearDownClass()) between' + ' each stress test run if an exception occurs' + ' during this run.'), + cfg.BoolOpt('full_clean_stack', + default=False, + help='Allows a full cleaning process after a stress test.' + ' Caution : this cleanup will remove every objects of' + ' every tenant.') +] + + +scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options') + +ScenarioGroup = [ + cfg.StrOpt('img_dir', + default='/opt/stack/new/devstack/files/images/' + 'cirros-0.3.1-x86_64-uec', + help='Directory containing image files'), + cfg.StrOpt('img_file', deprecated_name='qcow2_img_file', + default='cirros-0.3.1-x86_64-disk.img', + help='Image file name'), + cfg.StrOpt('img_file_2', deprecated_name='qcow2_img_file_2', + default='winxp.img', + help='Image file name 2'), + cfg.StrOpt('img_disk_format', + default='qcow2', + help='Image disk format'), + cfg.StrOpt('img_container_format', + default='bare', + help='Image container format'), + cfg.StrOpt('ami_img_file', + default='cirros-0.3.1-x86_64-blank.img', + help='AMI image file name'), + cfg.StrOpt('ari_img_file', + default='cirros-0.3.1-x86_64-initrd', + help='ARI image file name'), + cfg.StrOpt('aki_img_file', + default='cirros-0.3.1-x86_64-vmlinuz', + help='AKI image file name'), + cfg.StrOpt('ssh_user', + default='cirros', + help='ssh username for the image file'), + cfg.IntOpt( + 'large_ops_number', + default=0, + help="specifies how many resources to request at once. Used " + "for large operations testing."), + # TODO(yfried): add support for dhcpcd + cfg.StrOpt('dhcp_client', + default='udhcpc', + choices=["udhcpc", "dhclient"], + help='DHCP client used by images to renew DCHP lease. ' + 'If left empty, update operation will be skipped. ' + 'Supported clients: "udhcpc", "dhclient"') +] + + +service_available_group = cfg.OptGroup(name="service_available", + title="Available OpenStack Services") + +ServiceAvailableGroup = [ + cfg.BoolOpt('cinder', + default=True, + help="Whether or not cinder is expected to be available"), + cfg.BoolOpt('neutron', + default=False, + help="Whether or not neutron is expected to be available"), + cfg.BoolOpt('glance', + default=True, + help="Whether or not glance is expected to be available"), + cfg.BoolOpt('swift', + default=True, + help="Whether or not swift is expected to be available"), + cfg.BoolOpt('nova', + default=True, + help="Whether or not nova is expected to be available"), + cfg.BoolOpt('heat', + default=False, + help="Whether or not Heat is expected to be available"), + cfg.BoolOpt('ceilometer', + default=True, + help="Whether or not Ceilometer is expected to be available"), + cfg.BoolOpt('horizon', + default=True, + help="Whether or not Horizon is expected to be available"), + cfg.BoolOpt('sahara', + default=False, + help="Whether or not Sahara is expected to be available"), + cfg.BoolOpt('ironic', + default=False, + help="Whether or not Ironic is expected to be available"), + cfg.BoolOpt('trove', + default=False, + help="Whether or not Trove is expected to be available"), + cfg.BoolOpt('zaqar', + default=False, + help="Whether or not Zaqar is expected to be available"), +] + +debug_group = cfg.OptGroup(name="debug", + title="Debug System") + +DebugGroup = [ + cfg.StrOpt('trace_requests', + default='', + help="""A regex to determine which requests should be traced. + +This is a regex to match the caller for rest client requests to be able to +selectively trace calls out of specific classes and methods. It largely +exists for test development, and is not expected to be used in a real deploy +of tempest. This will be matched against the discovered ClassName:method +in the test environment. + +Expected values for this field are: + + * ClassName:test_method_name - traces one test_method + * ClassName:setUp(Class) - traces specific setup functions + * ClassName:tearDown(Class) - traces specific teardown functions + * ClassName:_run_cleanups - traces the cleanup functions + +If nothing is specified, this feature is not enabled. To trace everything +specify .* as the regex. +""") +] + +input_scenario_group = cfg.OptGroup(name="input-scenario", + title="Filters and values for" + " input scenarios") + +InputScenarioGroup = [ + cfg.StrOpt('image_regex', + default='^cirros-0.3.1-x86_64-uec$', + help="Matching images become parameters for scenario tests"), + cfg.StrOpt('flavor_regex', + default='^m1.nano$', + help="Matching flavors become parameters for scenario tests"), + cfg.StrOpt('non_ssh_image_regex', + default='^.*[Ww]in.*$', + help="SSH verification in tests is skipped" + "for matching images"), + cfg.StrOpt('ssh_user_regex', + default="[[\"^.*[Cc]irros.*$\", \"cirros\"]]", + help="List of user mapped to regex " + "to matching image names."), +] + + +baremetal_group = cfg.OptGroup(name='baremetal', + title='Baremetal provisioning service options', + help='When enabling baremetal tests, Nova ' + 'must be configured to use the Ironic ' + 'driver. The following paremeters for the ' + '[compute] section must be disabled: ' + 'console_output, interface_attach, ' + 'live_migration, pause, rescue, resize ' + 'shelve, snapshot, and suspend') + +BaremetalGroup = [ + cfg.StrOpt('catalog_type', + default='baremetal', + help="Catalog type of the baremetal provisioning service"), + cfg.BoolOpt('driver_enabled', + default=False, + help="Whether the Ironic nova-compute driver is enabled"), + cfg.StrOpt('driver', + default='fake', + help="Driver name which Ironic uses"), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the baremetal provisioning " + "service"), + cfg.IntOpt('active_timeout', + default=300, + help="Timeout for Ironic node to completely provision"), + cfg.IntOpt('association_timeout', + default=30, + help="Timeout for association of Nova instance and Ironic " + "node"), + cfg.IntOpt('power_timeout', + default=60, + help="Timeout for Ironic power transitions."), + cfg.IntOpt('unprovision_timeout', + default=60, + help="Timeout for unprovisioning an Ironic node.") +] + +cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options") + +CLIGroup = [ + cfg.BoolOpt('enabled', + default=True, + help="enable cli tests"), + cfg.StrOpt('cli_dir', + default='/usr/local/bin', + help="directory where python client binaries are located"), + cfg.BoolOpt('has_manage', + default=True, + help=("Whether the tempest run location has access to the " + "*-manage commands. In a pure blackbox environment " + "it will not.")), + cfg.IntOpt('timeout', + default=15, + help="Number of seconds to wait on a CLI timeout"), +] + +negative_group = cfg.OptGroup(name='negative', title="Negative Test Options") + +NegativeGroup = [ + cfg.StrOpt('test_generator', + default='tempest.common.' + + 'generator.negative_generator.NegativeTestGenerator', + help="Test generator class for all negative tests"), +] + +tecs_group = cfg.OptGroup(name='tecs', title="TECS Test Options") + +TecsGroup = [ + cfg.StrOpt('host_ip', + default='', + help="host ip"), + cfg.StrOpt('host_username', + help="host username"), + cfg.StrOpt('host_password', + help="host password"), + cfg.StrOpt('v4_flavor_id', + help="V4 flavor for V4 image tests"), + cfg.StrOpt('v4_omcnet_id', + help="V4 omcnet - flat for V4 VM tests"), + cfg.StrOpt('v4_basenet_id', + help="V4 basenet - vlan for V4 VM tests"), + cfg.StrOpt('v4_fabricnet_id', + help="V4 fabricnet for V4 VM tests"), + cfg.StrOpt('v4_outnet_id', + help="V4 outnet for V4 VM tests"), + cfg.StrOpt('network_image_id', + help="Network image for network tests"), + cfg.StrOpt('network_flavor_id', + help="Network flavor for network tests"), + cfg.StrOpt('network_ssh_user', + help="Network SSH user for network tests"), + cfg.StrOpt('network_ssh_pwd', + help="Network SSH password for network tests"), + cfg.StrOpt('network_server_ipaddr', + help="Network outside server ip address for network tests"), + cfg.StrOpt('network_server_ssh_user', + help="Network outside server SSH user for network tests"), + cfg.StrOpt('network_server_ssh_pwd', + help="Network outside server SSH password for network tests"), + cfg.StrOpt('network_server_ftp_user', + help="Network outside server FTP user for network tests"), + cfg.StrOpt('network_server_ftp_pwd', + help="Network outside server FTP password for network tests"), + cfg.StrOpt('network_lb_provider', + help="Network lbaas provider"), +] + +daisy_group = cfg.OptGroup(name='daisy', title="Daisy Test Tecs") +DaisyGroup = [ + cfg.StrOpt('daisy_endpoint', + default='', + help="daisy_endpoint"), + cfg.StrOpt('cluster_id', + default='', + help="cluster_id"), + cfg.StrOpt('install_ha_ip', + default='', + help="install_ha_ip"), + cfg.StrOpt('install_ha_netmask', + default='', + help="install_ha_netmask"), + cfg.StrOpt('install_ha_vip', + default='', + help="install_ha_vip"), + cfg.StrOpt('time_out', + default=1080, + help="time_out"), + cfg.StrOpt('install_ha_eth_name', + default='', + help="install_ha_eth_name"), + cfg.StrOpt('install_ha_mac', + default='', + help="install_ha_mac") +] + +_opts = [ + (auth_group, AuthGroup), + (compute_group, ComputeGroup), + (compute_features_group, ComputeFeaturesGroup), + (identity_group, IdentityGroup), + (identity_feature_group, IdentityFeatureGroup), + (image_group, ImageGroup), + (image_feature_group, ImageFeaturesGroup), + (network_group, NetworkGroup), + (network_feature_group, NetworkFeaturesGroup), + (messaging_group, MessagingGroup), + (validation_group, ValidationGroup), + (volume_group, VolumeGroup), + (volume_feature_group, VolumeFeaturesGroup), + (object_storage_group, ObjectStoreGroup), + (object_storage_feature_group, ObjectStoreFeaturesGroup), + (database_group, DatabaseGroup), + (orchestration_group, OrchestrationGroup), + (telemetry_group, TelemetryGroup), + (dashboard_group, DashboardGroup), + (data_processing_group, DataProcessingGroup), + (data_processing_feature_group, DataProcessingFeaturesGroup), + (boto_group, BotoGroup), + (stress_group, StressGroup), + (scenario_group, ScenarioGroup), + (service_available_group, ServiceAvailableGroup), + (debug_group, DebugGroup), + (baremetal_group, BaremetalGroup), + (input_scenario_group, InputScenarioGroup), + (cli_group, CLIGroup), + (negative_group, NegativeGroup), + (tecs_group, TecsGroup), + (daisy_group, DaisyGroup) +] + + +def register_opts(): + for g, o in _opts: + register_opt_group(_CONF, g, o) + + +def list_opts(): + """Return a list of oslo.config options available. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users. + """ + return [(g.name, o) for g, o in _opts] + + +# this should never be called outside of this class +class TempestConfigPrivate(object): + """Provides OpenStack configuration information.""" + + DEFAULT_CONFIG_DIR = os.path.join( + os.path.abspath(os.path.dirname(os.path.dirname(__file__))), + "etc") + + DEFAULT_CONFIG_FILE = "tempest.conf" + + def __getattr__(self, attr): + # Handles config options from the default group + return getattr(_CONF, attr) + + def _set_attrs(self): + self.auth = _CONF.auth + self.compute = _CONF.compute + self.compute_feature_enabled = _CONF['compute-feature-enabled'] + self.identity = _CONF.identity + self.identity_feature_enabled = _CONF['identity-feature-enabled'] + self.image = _CONF.image + self.image_feature_enabled = _CONF['image-feature-enabled'] + self.network = _CONF.network + self.network_feature_enabled = _CONF['network-feature-enabled'] + self.validation = _CONF.validation + self.volume = _CONF.volume + self.volume_feature_enabled = _CONF['volume-feature-enabled'] + self.object_storage = _CONF['object-storage'] + self.object_storage_feature_enabled = _CONF[ + 'object-storage-feature-enabled'] + self.database = _CONF.database + self.orchestration = _CONF.orchestration + self.messaging = _CONF.messaging + self.telemetry = _CONF.telemetry + self.dashboard = _CONF.dashboard + self.data_processing = _CONF.data_processing + self.data_processing_feature_enabled = _CONF[ + 'data_processing-feature-enabled'] + self.boto = _CONF.boto + self.stress = _CONF.stress + self.scenario = _CONF.scenario + self.service_available = _CONF.service_available + self.debug = _CONF.debug + self.baremetal = _CONF.baremetal + self.input_scenario = _CONF['input-scenario'] + self.cli = _CONF.cli + self.negative = _CONF.negative + _CONF.set_default('domain_name', self.identity.admin_domain_name, + group='identity') + _CONF.set_default('alt_domain_name', self.identity.admin_domain_name, + group='identity') + self.tecs = cfg.CONF.tecs + self.daisy = cfg.CONF.daisy + + def __init__(self, parse_conf=True, config_path=None): + """Initialize a configuration from a conf directory and conf file.""" + super(TempestConfigPrivate, self).__init__() + config_files = [] + failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE + + if config_path: + path = config_path + else: + # Environment variables override defaults... + conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', + self.DEFAULT_CONFIG_DIR) + conf_file = os.environ.get('TEMPEST_CONFIG', + self.DEFAULT_CONFIG_FILE) + + path = os.path.join(conf_dir, conf_file) + + if not os.path.isfile(path): + path = failsafe_path + + # only parse the config file if we expect one to exist. This is needed + # to remove an issue with the config file up to date checker. + if parse_conf: + config_files.append(path) + logging.register_options(_CONF) + if os.path.isfile(path): + _CONF([], project='tempest', default_config_files=config_files) + else: + _CONF([], project='tempest') + logging.setup(_CONF, 'tempest') + LOG = logging.getLogger('tempest') + LOG.info("Using tempest config file %s" % path) + register_opts() + self._set_attrs() + if parse_conf: + _CONF.log_opt_values(LOG, std_logging.DEBUG) + + +class TempestConfigProxy(object): + _config = None + _path = None + + _extra_log_defaults = [ + ('paramiko.transport', std_logging.INFO), + ('requests.packages.urllib3.connectionpool', std_logging.WARN), + ] + + def _fix_log_levels(self): + """Tweak the oslo log defaults.""" + for name, level in self._extra_log_defaults: + std_logging.getLogger(name).setLevel(level) + + def __getattr__(self, attr): + if not self._config: + self._fix_log_levels() + self._config = TempestConfigPrivate(config_path=self._path) + + return getattr(self._config, attr) + + def set_config_path(self, path): + self._path = path + + +CONF = TempestConfigProxy() diff --git a/test/tempest/tempest/test.py b/test/tempest/tempest/test.py new file mode 100755 index 00000000..080c89bb --- /dev/null +++ b/test/tempest/tempest/test.py @@ -0,0 +1,768 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import atexit +import functools +import json +import os +import re +import sys +import time +import urllib +import uuid + +import fixtures +from oslo_log import log as logging +from oslo_utils import importutils +import six +import testscenarios +import testtools + +from tempest import clients +from tempest.common import credentials +from tempest.common import fixed_network +import tempest.common.generator.valid_generator as valid +from tempest import config +from tempest import exceptions + +from tempest_lib.common.utils import misc + +LOG = logging.getLogger(__name__) + +CONF = config.CONF + + +def attr(*args, **kwargs): + """A decorator which applies the testtools attr decorator + + This decorator applies the testtools.testcase.attr if it is in the list of + attributes to testtools we want to apply. + """ + + def decorator(f): + if 'type' in kwargs and isinstance(kwargs['type'], str): + f = testtools.testcase.attr(kwargs['type'])(f) + elif 'type' in kwargs and isinstance(kwargs['type'], list): + for attr in kwargs['type']: + f = testtools.testcase.attr(attr)(f) + return f + + return decorator + + +def idempotent_id(id): + """Stub for metadata decorator""" + if not isinstance(id, six.string_types): + raise TypeError('Test idempotent_id must be string not %s' + '' % type(id).__name__) + uuid.UUID(id) + + def decorator(f): + f = testtools.testcase.attr('id-%s' % id)(f) + if f.__doc__: + f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__) + else: + f.__doc__ = 'Test idempotent id: %s' % id + return f + return decorator + + +def get_service_list(): + service_list = { + 'compute': CONF.service_available.nova, + 'image': CONF.service_available.glance, + 'baremetal': CONF.service_available.ironic, + 'volume': CONF.service_available.cinder, + 'orchestration': CONF.service_available.heat, + # NOTE(mtreinish) nova-network will provide networking functionality + # if neutron isn't available, so always set to True. + 'network': True, + 'identity': True, + 'object_storage': CONF.service_available.swift, + 'dashboard': CONF.service_available.horizon, + 'telemetry': CONF.service_available.ceilometer, + 'data_processing': CONF.service_available.sahara, + 'database': CONF.service_available.trove + } + return service_list + + +def services(*args, **kwargs): + """A decorator used to set an attr for each service used in a test case + + This decorator applies a testtools attr for each service that gets + exercised by a test case. + """ + def decorator(f): + services = ['compute', 'image', 'baremetal', 'volume', 'orchestration', + 'network', 'identity', 'object_storage', 'dashboard', + 'telemetry', 'data_processing', 'database'] + for service in args: + if service not in services: + raise exceptions.InvalidServiceTag('%s is not a valid ' + 'service' % service) + attr(type=list(args))(f) + + @functools.wraps(f) + def wrapper(self, *func_args, **func_kwargs): + service_list = get_service_list() + + for service in args: + if not service_list[service]: + msg = 'Skipped because the %s service is not available' % ( + service) + raise testtools.TestCase.skipException(msg) + return f(self, *func_args, **func_kwargs) + return wrapper + return decorator + + +def stresstest(*args, **kwargs): + """Add stress test decorator + + For all functions with this decorator a attr stress will be + set automatically. + + @param class_setup_per: allowed values are application, process, action + ``application``: once in the stress job lifetime + ``process``: once in the worker process lifetime + ``action``: on each action + @param allow_inheritance: allows inheritance of this attribute + """ + def decorator(f): + if 'class_setup_per' in kwargs: + setattr(f, "st_class_setup_per", kwargs['class_setup_per']) + else: + setattr(f, "st_class_setup_per", 'process') + if 'allow_inheritance' in kwargs: + setattr(f, "st_allow_inheritance", kwargs['allow_inheritance']) + else: + setattr(f, "st_allow_inheritance", False) + attr(type='stress')(f) + return f + return decorator + + +def requires_ext(*args, **kwargs): + """A decorator to skip tests if an extension is not enabled + + @param extension + @param service + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*func_args, **func_kwargs): + if not is_extension_enabled(kwargs['extension'], + kwargs['service']): + msg = "Skipped because %s extension: %s is not enabled" % ( + kwargs['service'], kwargs['extension']) + raise testtools.TestCase.skipException(msg) + return func(*func_args, **func_kwargs) + return wrapper + return decorator + + +def is_extension_enabled(extension_name, service): + """A function that will check the list of enabled extensions from config + + """ + config_dict = { + 'compute': CONF.compute_feature_enabled.api_extensions, + 'volume': CONF.volume_feature_enabled.api_extensions, + 'network': CONF.network_feature_enabled.api_extensions, + 'object': CONF.object_storage_feature_enabled.discoverable_apis, + } + if len(config_dict[service]) == 0: + return False + if config_dict[service][0] == 'all': + return True + if extension_name in config_dict[service]: + return True + return False + + +at_exit_set = set() + + +def validate_tearDownClass(): + if at_exit_set: + LOG.error( + "tearDownClass does not call the super's " + "tearDownClass in these classes: \n" + + str(at_exit_set)) + + +atexit.register(validate_tearDownClass) + + +class BaseTestCase(testtools.testcase.WithAttributes, + testtools.TestCase): + """The test base class defines Tempest framework for class level fixtures. + `setUpClass` and `tearDownClass` are defined here and cannot be overwritten + by subclasses (enforced via hacking rule T105). + + Set-up is split in a series of steps (setup stages), which can be + overwritten by test classes. Set-up stages are: + - skip_checks + - setup_credentials + - setup_clients + - resource_setup + + Tear-down is also split in a series of steps (teardown stages), which are + stacked for execution only if the corresponding setup stage had been + reached during the setup phase. Tear-down stages are: + - clear_isolated_creds (defined in the base test class) + - resource_cleanup + """ + + setUpClassCalled = False + _service = None + + # NOTE(andreaf) credentials holds a list of the credentials to be allocated + # at class setup time. Credential types can be 'primary', 'alt' or 'admin' + credentials = [] + network_resources = {} + + # NOTE(sdague): log_format is defined inline here instead of using the oslo + # default because going through the config path recouples config to the + # stress tests too early, and depending on testr order will fail unit tests + log_format = ('%(asctime)s %(process)d %(levelname)-8s ' + '[%(name)s] %(message)s') + + @classmethod + def setUpClass(cls): + # It should never be overridden by descendants + if hasattr(super(BaseTestCase, cls), 'setUpClass'): + super(BaseTestCase, cls).setUpClass() + cls.setUpClassCalled = True + # Stack of (name, callable) to be invoked in reverse order at teardown + cls.teardowns = [] + # All the configuration checks that may generate a skip + cls.skip_checks() + try: + # Allocation of all required credentials and client managers + cls.teardowns.append(('credentials', cls.clear_isolated_creds)) + cls.setup_credentials() + # Shortcuts to clients + cls.setup_clients() + # Additional class-wide test resources + cls.teardowns.append(('resources', cls.resource_cleanup)) + cls.resource_setup() + except Exception: + etype, value, trace = sys.exc_info() + LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % ( + etype, cls.__name__)) + cls.tearDownClass() + try: + raise etype, value, trace + finally: + del trace # to avoid circular refs + + @classmethod + def tearDownClass(cls): + at_exit_set.discard(cls) + # It should never be overridden by descendants + if hasattr(super(BaseTestCase, cls), 'tearDownClass'): + super(BaseTestCase, cls).tearDownClass() + # Save any existing exception, we always want to re-raise the original + # exception only + etype, value, trace = sys.exc_info() + # If there was no exception during setup we shall re-raise the first + # exception in teardown + re_raise = (etype is None) + while cls.teardowns: + name, teardown = cls.teardowns.pop() + # Catch any exception in tearDown so we can re-raise the original + # exception at the end + try: + teardown() + except Exception as te: + sys_exec_info = sys.exc_info() + tetype = sys_exec_info[0] + # TODO(andreaf): Till we have the ability to cleanup only + # resources that were successfully setup in resource_cleanup, + # log AttributeError as info instead of exception. + if tetype is AttributeError and name == 'resources': + LOG.info("tearDownClass of %s failed: %s" % (name, te)) + else: + LOG.exception("teardown of %s failed: %s" % (name, te)) + if not etype: + etype, value, trace = sys_exec_info + # If exceptions were raised during teardown, an not before, re-raise + # the first one + if re_raise and etype is not None: + try: + raise etype, value, trace + finally: + del trace # to avoid circular refs + + @classmethod + def sdn_skip_check(cls): + if not hasattr(cls, 'is_sdn'): + BaseTestCase.is_sdn = misc.sdn_skip_check() + + if BaseTestCase.is_sdn: + raise testtools.TestCase.skipException( + "skip : SDN not support this function") + + @classmethod + def dvs_skip_check(cls): + if not hasattr(cls, 'is_dvs'): + BaseTestCase.is_dvs = misc.dvs_skip_check() + + if BaseTestCase.is_dvs: + raise testtools.TestCase.skipException( + "skip : DVS not support this function") + + @classmethod + def lvm_skip_check(cls): + if not hasattr(cls, 'is_lvm'): + BaseTestCase.is_lvm = misc.lvm_skip_check() + + if BaseTestCase.is_lvm: + raise testtools.TestCase.skipException("skip : LVM " + + " not support this function") + + @classmethod + def connectvm_skip_check(cls): + if not hasattr(cls, 'can_connect_vm'): + BaseTestCase.can_connect_vm =\ + (CONF.network.tenant_networks_reachable or + CONF.compute.use_floatingip_for_ssh) + + if not BaseTestCase.can_connect_vm: + raise testtools.TestCase.skipException("skip : no network " + "reachable to ssh vm") + + @classmethod + def skip_checks(cls): + """Class level skip checks. Subclasses verify in here all + conditions that might prevent the execution of the entire test class. + Checks implemented here may not make use API calls, and should rely on + configuration alone. + In general skip checks that require an API call are discouraged. + If one is really needed it may be implemented either in the + resource_setup or at test level. + """ + if 'admin' in cls.credentials and not credentials.is_admin_available(): + msg = "Missing Identity Admin API credentials in configuration." + raise cls.skipException(msg) + if 'alt' is cls.credentials and not credentials.is_alt_available(): + msg = "Missing a 2nd set of API credentials in configuration." + raise cls.skipException(msg) + + @classmethod + def setup_credentials(cls): + """Allocate credentials and the client managers from them. + A test class that requires network resources must override + setup_credentials and defined the required resources before super + is invoked. + """ + for credentials_type in cls.credentials: + # This may raise an exception in case credentials are not available + # In that case we want to let the exception through and the test + # fail accordingly + manager = cls.get_client_manager( + credential_type=credentials_type) + setattr(cls, 'os_%s' % credentials_type, manager) + # Setup some common aliases + # TODO(andreaf) The aliases below are a temporary hack + # to avoid changing too much code in one patch. They should + # be removed eventually + if credentials_type == 'primary': + cls.os = cls.manager = cls.os_primary + if credentials_type == 'admin': + cls.os_adm = cls.admin_manager = cls.os_admin + if credentials_type == 'alt': + cls.alt_manager = cls.os_alt + + @classmethod + def setup_clients(cls): + """Create links to the clients into the test object.""" + # TODO(andreaf) There is a fair amount of code that could me moved from + # base / test classes in here. Ideally tests should be able to only + # specify which client is `client` and nothing else. + pass + + @classmethod + def resource_setup(cls): + """Class level resource setup for test cases. + """ + pass + + @classmethod + def resource_cleanup(cls): + """Class level resource cleanup for test cases. + Resource cleanup must be able to handle the case of partially setup + resources, in case a failure during `resource_setup` should happen. + """ + pass + + def setUp(self): + super(BaseTestCase, self).setUp() + if not self.setUpClassCalled: + raise RuntimeError("setUpClass does not calls the super's" + "setUpClass in the " + + self.__class__.__name__) + at_exit_set.add(self.__class__) + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or + os.environ.get('OS_STDOUT_CAPTURE') == '1'): + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or + os.environ.get('OS_STDERR_CAPTURE') == '1'): + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + if (os.environ.get('OS_LOG_CAPTURE') != 'False' and + os.environ.get('OS_LOG_CAPTURE') != '0'): + self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, + format=self.log_format, + level=None)) + + @classmethod + def get_client_manager(cls, identity_version=None, + credential_type='primary'): + """ + Returns an OpenStack client manager + """ + force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None) + identity_version = identity_version or CONF.identity.auth_version + + if (not hasattr(cls, 'isolated_creds') or + not cls.isolated_creds.name == cls.__name__): + cls.isolated_creds = credentials.get_isolated_credentials( + name=cls.__name__, network_resources=cls.network_resources, + force_tenant_isolation=force_tenant_isolation, + identity_version=identity_version + ) + + credentials_method = 'get_%s_creds' % credential_type + if hasattr(cls.isolated_creds, credentials_method): + creds = getattr(cls.isolated_creds, credentials_method)() + else: + raise exceptions.InvalidCredentials( + "Invalid credentials type %s" % credential_type) + os = clients.Manager(credentials=creds, service=cls._service) + return os + + @classmethod + def clear_isolated_creds(cls): + """ + Clears isolated creds if set + """ + if hasattr(cls, 'isolated_creds'): + cls.isolated_creds.clear_isolated_creds() + + @classmethod + def set_network_resources(cls, network=False, router=False, subnet=False, + dhcp=False): + """Specify which network resources should be created + + @param network + @param router + @param subnet + @param dhcp + """ + # network resources should be set only once from callers + # in order to ensure that even if it's called multiple times in + # a chain of overloaded methods, the attribute is set only + # in the leaf class + if not cls.network_resources: + cls.network_resources = { + 'network': network, + 'router': router, + 'subnet': subnet, + 'dhcp': dhcp} + + @classmethod + def get_tenant_network(cls): + """Get the network to be used in testing + + :return: network dict including 'id' and 'name' + """ + # Make sure isolated_creds exists and get a network client + networks_client = cls.get_client_manager().networks_client + isolated_creds = getattr(cls, 'isolated_creds', None) + # In case of nova network, isolated tenants are not able to list the + # network configured in fixed_network_name, even if the can use it + # for their servers, so using an admin network client to validate + # the network name + if (not CONF.service_available.neutron and + credentials.is_admin_available()): + admin_creds = isolated_creds.get_admin_creds() + networks_client = clients.Manager(admin_creds).networks_client + return fixed_network.get_tenant_network(isolated_creds, + networks_client) + + def assertEmpty(self, list, msg=None): + self.assertTrue(len(list) == 0, msg) + + def assertNotEmpty(self, list, msg=None): + self.assertTrue(len(list) > 0, msg) + + def assertRaisesMessage(self, exc, msg, func, *args, **kwargs): + try: + func(*args, **kwargs) + except Exception, e: + self.assertEqual(msg, str(e)) + self.assertTrue(isinstance(e, exc), + "Expected %s, got %s" % (exc, type(e))) + + +class NegativeAutoTest(BaseTestCase): + + _resources = {} + + @classmethod + def setUpClass(cls): + super(NegativeAutoTest, cls).setUpClass() + os = cls.get_client_manager(credential_type='primary') + cls.client = os.negative_client + + @staticmethod + def load_tests(*args): + """ + Wrapper for testscenarios to set the mandatory scenarios variable + only in case a real test loader is in place. Will be automatically + called in case the variable "load_tests" is set. + """ + if getattr(args[0], 'suiteClass', None) is not None: + loader, standard_tests, pattern = args + else: + standard_tests, module, loader = args + for test in testtools.iterate_tests(standard_tests): + schema = getattr(test, '_schema', None) + if schema is not None: + setattr(test, 'scenarios', + NegativeAutoTest.generate_scenario(schema)) + return testscenarios.load_tests_apply_scenarios(*args) + + @staticmethod + def generate_scenario(description): + """ + Generates the test scenario list for a given description. + + :param description: A file or dictionary with the following entries: + name (required) name for the api + http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE + url (required) the url to be appended to the catalog url with '%s' + for each resource mentioned + resources: (optional) A list of resource names such as "server", + "flavor", etc. with an element for each '%s' in the url. This + method will call self.get_resource for each element when + constructing the positive test case template so negative + subclasses are expected to return valid resource ids when + appropriate. + json-schema (optional) A valid json schema that will be used to + create invalid data for the api calls. For "GET" and "HEAD", + the data is used to generate query strings appended to the url, + otherwise for the body of the http call. + """ + LOG.debug(description) + generator = importutils.import_class( + CONF.negative.test_generator)() + generator.validate_schema(description) + schema = description.get("json-schema", None) + resources = description.get("resources", []) + scenario_list = [] + expected_result = None + for resource in resources: + if isinstance(resource, dict): + expected_result = resource['expected_result'] + resource = resource['name'] + LOG.debug("Add resource to test %s" % resource) + scn_name = "inv_res_%s" % (resource) + scenario_list.append((scn_name, {"resource": (resource, + str(uuid.uuid4())), + "expected_result": expected_result + })) + if schema is not None: + for scenario in generator.generate_scenarios(schema): + scenario_list.append((scenario['_negtest_name'], + scenario)) + LOG.debug(scenario_list) + return scenario_list + + def execute(self, description): + """ + Execute a http call on an api that are expected to + result in client errors. First it uses invalid resources that are part + of the url, and then invalid data for queries and http request bodies. + + :param description: A json file or dictionary with the following + entries: + name (required) name for the api + http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE + url (required) the url to be appended to the catalog url with '%s' + for each resource mentioned + resources: (optional) A list of resource names such as "server", + "flavor", etc. with an element for each '%s' in the url. This + method will call self.get_resource for each element when + constructing the positive test case template so negative + subclasses are expected to return valid resource ids when + appropriate. + json-schema (optional) A valid json schema that will be used to + create invalid data for the api calls. For "GET" and "HEAD", + the data is used to generate query strings appended to the url, + otherwise for the body of the http call. + + """ + LOG.info("Executing %s" % description["name"]) + LOG.debug(description) + generator = importutils.import_class( + CONF.negative.test_generator)() + schema = description.get("json-schema", None) + method = description["http-method"] + url = description["url"] + expected_result = None + if "default_result_code" in description: + expected_result = description["default_result_code"] + + resources = [self.get_resource(r) for + r in description.get("resources", [])] + + if hasattr(self, "resource"): + # Note(mkoderer): The resources list already contains an invalid + # entry (see get_resource). + # We just send a valid json-schema with it + valid_schema = None + if schema: + valid_schema = \ + valid.ValidTestGenerator().generate_valid(schema) + new_url, body = self._http_arguments(valid_schema, url, method) + elif hasattr(self, "_negtest_name"): + schema_under_test = \ + valid.ValidTestGenerator().generate_valid(schema) + local_expected_result = \ + generator.generate_payload(self, schema_under_test) + if local_expected_result is not None: + expected_result = local_expected_result + new_url, body = \ + self._http_arguments(schema_under_test, url, method) + else: + raise Exception("testscenarios are not active. Please make sure " + "that your test runner supports the load_tests " + "mechanism") + + if "admin_client" in description and description["admin_client"]: + if not credentials.is_admin_available(): + msg = ("Missing Identity Admin API credentials in" + "configuration.") + raise self.skipException(msg) + creds = self.isolated_creds.get_admin_creds() + os_adm = clients.Manager(credentials=creds) + client = os_adm.negative_client + else: + client = self.client + resp, resp_body = client.send_request(method, new_url, + resources, body=body) + self._check_negative_response(expected_result, resp.status, resp_body) + + def _http_arguments(self, json_dict, url, method): + LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method)) + if not json_dict: + return url, None + elif method in ["GET", "HEAD", "PUT", "DELETE"]: + return "%s?%s" % (url, urllib.urlencode(json_dict)), None + else: + return url, json.dumps(json_dict) + + def _check_negative_response(self, expected_result, result, body): + self.assertTrue(result >= 400 and result < 500 and result != 413, + "Expected client error, got %s:%s" % + (result, body)) + self.assertTrue(expected_result is None or expected_result == result, + "Expected %s, got %s:%s" % + (expected_result, result, body)) + + @classmethod + def set_resource(cls, name, resource): + """ + This function can be used in setUpClass context to register a resoruce + for a test. + + :param name: The name of the kind of resource such as "flavor", "role", + etc. + :resource: The id of the resource + """ + cls._resources[name] = resource + + def get_resource(self, name): + """ + Return a valid uuid for a type of resource. If a real resource is + needed as part of a url then this method should return one. Otherwise + it can return None. + + :param name: The name of the kind of resource such as "flavor", "role", + etc. + """ + if isinstance(name, dict): + name = name['name'] + if hasattr(self, "resource") and self.resource[0] == name: + LOG.debug("Return invalid resource (%s) value: %s" % + (self.resource[0], self.resource[1])) + return self.resource[1] + if name in self._resources: + return self._resources[name] + return None + + +def SimpleNegativeAutoTest(klass): + """ + This decorator registers a test function on basis of the class name. + """ + @attr(type=['negative']) + def generic_test(self): + if hasattr(self, '_schema'): + self.execute(self._schema) + + cn = klass.__name__ + cn = cn.replace('JSON', '') + cn = cn.replace('Test', '') + # NOTE(mkoderer): replaces uppercase chars inside the class name with '_' + lower_cn = re.sub('(?>$log_path/install_venv.err + exit 1 +fi + +rm -rf /etc/yum.repos.d/opencos.repo +opencos_repo=/etc/yum.repos.d/opencos.repo +echo "Create $opencos_repo ..." +echo "[opencos]">>$opencos_repo +echo "name=opencos">>$opencos_repo +echo "baseurl=http://10.43.177.17/pypi/">>$opencos_repo +echo "enabled=1">>$opencos_repo +echo "gpgcheck=0">>$opencos_repo + +rm -rf ~/.pip/pip.conf +pip_config=~/.pip/pip.conf +echo "Create $pip_config ..." +if [ ! -d `dirname $pip_config` ]; then + mkdir -p `dirname $pip_config` +fi +echo "[global]">$pip_config +echo "find-links = http://10.43.177.17/pypi">>$pip_config +echo "no-index = true">>$pip_config + +rm -rf ~/.pydistutils.cfg +pydistutils_cfg=~/.pydistutils.cfg +echo "Create $pydistutils_cfg ..." +echo "[easy_install]">$pydistutils_cfg +echo "index_url = http://10.43.177.17/pypi">>$pydistutils_cfg + + +modules=(virtualenv mariadb-devel testtools testrepository testresources fixtures python-subunit testscenarios postgresql-devel oslo.serialization oslo.utils libffi-devel + cyrus-sasl-devel sqlite-devel libxslt-devel openldap-devel) + +yum clean all 1>/dev/null 2>/dev/null +# for virtual environment demand pip version>=1.6, so install it whether installed. +yum --disablerepo=* --enablerepo=opencos install -y pip extras 1>$log_path/$mod.log 2>$log_path/$mod.err +# install modules +echo "install modules">>$log_path/install_venv.log +for mod in ${modules[@]}; do + echo -n "yum install $mod ... " + already_install=`rpm -qa | grep $mod` + if [ "$already_install" == "" ]; then + yum --disablerepo=* --enablerepo=opencos install -y $mod 1>$log_path/$mod.log 2>$log_path/$mod.err + if [ -s $log_path/$mod.err ]; then + echo "fail!" + echo "Install $mod fail! Please manually using the yum installation package,commond is \"yum install $mod\"">>$log_path/install_venv.err + # exit 1 + else + echo "ok(install finish)" + fi + else + echo "ok(already exist)" + fi +done + +#echo "install venv ... ">>$log_path/install_venv.log +#chmod +x tools/* +#python tools/install_venv.py 1>$log_path/install_venv.log 2>$log_path/install_venv.err +#if grep "development environment setup is complete." $log_path/install_venv.log +# then +# echo "development environment setup is complete...">>$log_path/install_venv.log +#else +# echo "development environment setup is fail,please check $log_path/install_venv.err" +# cat $log_path/install_venv.err +## exit 1 +#fi + +echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" +echo "copy tempest.conf.sample to tempest.conf....." +tempestconf=etc/tempest.conf +if [ ! -e $tempestconf ];then + cp etc/tempest.conf.sample etc/tempest.conf 2>>err.txt +fi + +source /root/keystonerc_admin + +#######Tempest CONF####### + +#######[DEFAULT]####### +echo "config tempest.conf DEFAULT lock_path /tmp" +openstack-config --set $tempestconf DEFAULT lock_path /tmp 2>>$errfile + +echo "config tempest.conf DEFAULT log_file tempest.log" +openstack-config --set $tempestconf DEFAULT log_file tempest.log 2>>$errfile + +########[identity]######## +if [ ! -n "`keystone user-list 2>>$errfile | grep -w Member`" ]; then + keystone user-create --name Member 2>>$errfile +fi + +if [ ! -n "`keystone role-list 2>>$errfile | grep -w Member`" ]; then + keystone role-create --name Member 2>>$errfile +fi + +if [ ! -n "`keystone tenant-list 2>>$errfile |grep -w demo`" ]; then + keystone tenant-create --name demo --enabled true 2>>$errfile +fi + +if [ ! -n "`keystone user-list 2>>$errfile |grep -w demo`" ]; then + keystone user-create --name demo --tenant demo --pass secret --enabled true 2>>$errfile +fi + +if [ ! -n "`keystone tenant-list 2>>$errfile |grep -w alt_demo`" ]; then + keystone tenant-create --name alt_demo --enabled true 2>>$errfile +fi + +if [ ! -n "`keystone user-list 2>>$errfile |grep -w alt_demo`" ]; then + keystone user-create --name alt_demo --tenant alt_demo --pass secret --enabled true 2>>$errfile +fi + +openstack-config --set $tempestconf identity admin_username admin 2>>$errfile +openstack-config --set $tempestconf identity admin_role admin 2>>$errfile +openstack-config --set $tempestconf identity admin_tenant_name admin 2>>$errfile +openstack-config --set $tempestconf identity admin_password keystone 2>>$errfile +openstack-config --set $tempestconf identity alt_tenant_name alt_demo 2>>$errfile +openstack-config --set $tempestconf identity alt_username alt_demo 2>>$errfile +openstack-config --set $tempestconf identity alt_password secret 2>>$errfile +openstack-config --set $tempestconf identity tenant_name demo 2>>$errfile +openstack-config --set $tempestconf identity username demo 2>>$errfile +openstack-config --set $tempestconf identity password secret 2>>$errfile +openstack-config --set $tempestconf identity auth_version v2 2>>$errfile +openstack-config --set $tempestconf identity catalog_type identity 2>>$errfile +openstack-config --set $tempestconf identity endpoint_type publicURL 2>>$errfile +openstack-config --set $tempestconf identity region RegionOne 2>>$errfile +openstack-config --set $tempestconf identity uri http://127.0.0.1:5000/v2.0/ 2>>$errfile +openstack-config --set $tempestconf identity uri_v3 http://127.0.0.1:5000/v3/ 2>>$errfile + +#######[cli]####### +openstack-config --set $tempestconf cli cli_dir /usr/bin 2>>$errfile + +#######[compute]####### +openstack-config --set $tempestconf compute build_timeout 300 2>>$errfile +openstack-config --set $tempestconf compute run_ssh true 2>>$errfile +openstack-config --set $tempestconf compute ssh_auth_method adminpass 2>>$errfile +openstack-config --set $tempestconf compute ssh_user cirros 2>>$errfile +openstack-config --set $tempestconf compute image_ssh_user cirros 2>>$errfile +openstack-config --set $tempestconf compute image_ssh_password cubswin:\) 2>>$errfile + +if [ ! -n "`glance image-list 2>>$errfile |grep -w cirros_icehouse_test |awk '{print $2}'`" ]; then +glance image-create --name cirros_icehouse_test --is-public true --disk-format qcow2 --copy-from http://10.43.175.61:8081/files/linux/cirros-0.3.0-x86_64-disk.img 2>>$errfile +fi + +if [ ! -n "`glance image-list 2>>$errfile |grep -w cirros_icehouse_test_alt |awk '{print $2}'`" ]; then +glance image-create --name cirros_icehouse_test_alt --is-public true --disk-format qcow2 --copy-from http://10.43.175.61:8081/files/linux/cirros-0.3.2-x86_64-disk.img 2>>$errfile +fi + +IMAGE=`glance image-list 2>>$errfile |grep -w cirros_icehouse_test |awk -F " " '{print $2}'` +IMAGE_ALT=`glance image-list 2>>$errfile |grep -w cirros_icehouse_test_alt |awk -F " " '{print $2}'` + +openstack-config --set $tempestconf compute image_ref $IMAGE 2>>$errfile +openstack-config --set $tempestconf compute image_ref_alt $IMAGE_ALT 2>>$errfile + +#CONF.compute.flavor_ref +FLAVORNAME=m1.tiny +FLAVORALT=m1.small +FLAVORID=`nova flavor-list 2>>$errfile |grep -w $FLAVORNAME |awk '{print $2}'` +FLAVORALTID=`nova flavor-list 2>>$errfile |grep -w $FLAVORALT |awk '{print $2}'` +openstack-config --set $tempestconf compute flavor_ref $FLAVORID 2>>$errfile +openstack-config --set $tempestconf compute flavor_ref_alt $FLAVORALTID 2>>$errfile + +#######[dashboard]####### +openstack-config --set $tempestconf dashboard dashboard_url http://localhost/dashboard/ 2>>$errfile +openstack-config --set $tempestconf dashboard login_url http://localhost/dashboard/auth/login/ 2>>$errfile + +#######[service_available]####### +openstack-config --set $tempestconf service_available ceilometer false 2>>$errfile +openstack-config --set $tempestconf service_available cinder true 2>>$errfile +openstack-config --set $tempestconf service_available glance true 2>>$errfile +openstack-config --set $tempestconf service_available heat false 2>>$errfile +openstack-config --set $tempestconf service_available horizon true 2>>$errfile +openstack-config --set $tempestconf service_available ironic false 2>>$errfile +openstack-config --set $tempestconf service_available neutron true 2>>$errfile +openstack-config --set $tempestconf service_available nova true 2>>$errfile +openstack-config --set $tempestconf service_available sahara false 2>>$errfile +openstack-config --set $tempestconf service_available swift false 2>>$errfile +openstack-config --set $tempestconf service_available trove false 2>>$errfile +openstack-config --set $tempestconf service_available zaqar false 2>>$errfile + + +if [ -s err.txt ];then + cat err.txt + exit 1 +fi + +echo "tempest envirmonent and tempest.conf config successful..." +exit 0 diff --git a/test/tempest/test-requirements.txt b/test/tempest/test-requirements.txt new file mode 100644 index 00000000..76ae5216 --- /dev/null +++ b/test/tempest/test-requirements.txt @@ -0,0 +1,13 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +hacking<0.11,>=0.10.0 +# needed for doc build +sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 +python-subunit>=0.0.18 +oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 +mox>=0.5.3 +mock>=1.0 +coverage>=3.6 +oslotest>=1.5.1,<1.6.0 # Apache-2.0 +stevedore>=1.3.0,<1.4.0 # Apache-2.0 diff --git a/test/tempest/tox.ini b/test/tempest/tox.ini new file mode 100644 index 00000000..db52ccda --- /dev/null +++ b/test/tempest/tox.ini @@ -0,0 +1,133 @@ +[tox] +envlist = pep8,py27 +minversion = 1.6 +skipsdist = True + +[tempestenv] +sitepackages = False +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=./tempest/test_discover +deps = setuptools + -r{toxinidir}/requirements.txt + +[testenv] +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=./tempest/tests +usedevelop = True +install_command = pip install -U {opts} {packages} +whitelist_externals = * +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox.sh '{posargs}' + +[testenv:genconfig] +commands = oslo-config-generator --config-file tools/config/config-generator.tempest.conf + +[testenv:cover] +setenv = OS_TEST_PATH=./tempest/tests +commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}' + +[testenv:all] +sitepackages = {[tempestenv]sitepackages} +# 'all' includes slow tests +setenv = {[tempestenv]setenv} + OS_TEST_TIMEOUT=1200 +deps = {[tempestenv]deps} +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox.sh '{posargs}' + +[testenv:full] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +# The regex below is used to select which tests to run and exclude the slow tag: +# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610 +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}' + +[testenv:full-serial] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +# The regex below is used to select which tests to run and exclude the slow tag: +# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610 +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}' + +[testenv:heat-slow] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} + OS_TEST_TIMEOUT=1200 +deps = {[tempestenv]deps} +# The regex below is used to select heat api/scenario tests tagged as slow. +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}' + +[testenv:large-ops] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +commands = + find . -type f -name "*.pyc" -delete + python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}' + +[testenv:smoke] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}' + +[testenv:smoke-serial] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +# This is still serial because neutron doesn't work with parallel. See: +# https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke +# job would fail if we moved it to parallel. +commands = + find . -type f -name "*.pyc" -delete + bash tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])((smoke)|(^tempest\.scenario)) {posargs}' + +[testenv:stress] +sitepackages = {[tempestenv]sitepackages} +setenv = {[tempestenv]setenv} +deps = {[tempestenv]deps} +commands = + run-tempest-stress {posargs} + +[testenv:venv] +commands = {posargs} + +[testenv:docs] +commands = python setup.py build_sphinx {posargs} + +[testenv:pep8] +commands = + flake8 {posargs} + {toxinidir}/tools/config/check_uptodate.sh + python tools/check_uuid.py + +[testenv:uuidgen] +commands = + python tools/check_uuid.py --fix + +[hacking] +local-check-factory = tempest.hacking.checks.factory +import_exceptions = tempest.services + +[flake8] +# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.openstack.org/#/c/36788/ +# E123 skipped because it is ignored by default in the default pep8 +# E129 skipped because it is too limiting when combined with other rules +# Skipped because of new hacking 0.9: H405 +ignore = E125,E123,E129,H404,H405,E501 +show-source = True +exclude = .git,.venv,.tox,dist,doc,openstack,*egg