Remove Ceilometer and vCenter ostf

Change-Id: I31297ad399094b8f88739211bcecb777359f9dfc
Partial-bug: #1669700
Implements: blueprint remove-vmware
This commit is contained in:
ibumarskov 2017-03-09 13:21:32 +04:00 committed by Vladimir Khlyunev
parent 73c0f9c51c
commit 0bf90a7a09
17 changed files with 11 additions and 2084 deletions

View File

@ -1,482 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import logging
import neutronclient.common.exceptions as neutron_exc
from fuel_health.common.utils.data_utils import rand_name
import fuel_health.nmanager
import fuel_health.test
LOG = logging.getLogger(__name__)
def check_compute_nodes():
"""Decorator that checks a compute existence in the environment.
Decorated tests must be skipped if there are no compute nodes.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if (not self.config.compute.compute_nodes and
not self.config.compute.use_vcenter):
self.skipTest('There are no compute nodes in the environment. '
'Test skipped.')
return func(self, *args, **kwargs)
return wrapper
return decorator
class CeilometerBaseTest(fuel_health.nmanager.PlatformServicesBaseClass):
@classmethod
def setUpClass(cls):
super(CeilometerBaseTest, cls).setUpClass()
if cls.manager.clients_initialized:
cls.wait_interval = cls.config.compute.build_interval
cls.wait_timeout = cls.config.compute.build_timeout
cls.objects_for_delete = []
cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
'disk.ephemeral.size']
cls.neutron_network_notifications = ['network', 'network.create',
'network.update']
cls.neutron_subnet_notifications = ['subnet', 'subnet.create',
'subnet.update']
cls.neutron_port_notifications = ['port', 'port.create',
'port.update']
cls.neutron_router_notifications = ['router', 'router.create',
'router.update']
cls.neutron_floatingip_notifications = ['ip.floating.create',
'ip.floating.update']
cls.volume_events = [
'volume.create.start', 'volume.create.end',
'volume.delete.start', 'volume.delete.end',
'volume.update.start', 'volume.update.end',
'volume.resize.start', 'volume.resize.end',
'volume.attach.start', 'volume.attach.end',
'volume.detach.start', 'volume.detach.end']
cls.snapshot_events = [
'snapshot.create.start', 'snapshot.create.end',
'snapshot.delete.start', 'snapshot.delete.end']
cls.glance_notifications = ['image.size', 'image.update',
'image.upload', 'image.download',
'image.serve', 'image.delete']
cls.swift_notifications = ['storage.objects.incoming.bytes',
'storage.objects.outgoing.bytes',
'storage.api.request']
cls.heat_notifications = ['stack.create', 'stack.update',
'stack.delete', 'stack.resume',
'stack.suspend']
cls.keystone_user_notifications = [
'identity.user.created', 'identity.user.deleted',
'identity.user.updated']
cls.keystone_role_notifications = [
'identity.role.created', 'identity.role.updated',
'identity.role.deleted']
cls.keystone_role_assignment_notifications = [
'identity.role_assignment.created',
'identity.role_assignment.deleted']
cls.keystone_project_notifications = [
'identity.project.created', 'identity.project.updated',
'identity.project.deleted']
cls.keystone_group_notifications = [
'identity.group.created', 'identity.group.updated',
'identity.group.deleted']
cls.keystone_trust_notifications = [
'identity.trust.created',
'identity.trust.deleted']
cls.sahara_cluster_notifications = [
'cluster.create', 'cluster.update', 'cluster.delete']
def setUp(self):
super(CeilometerBaseTest, self).setUp()
self.check_clients_state()
if not self.ceilometer_client:
self.skipTest('Ceilometer is unavailable.')
def create_server(self, name, **kwargs):
server = self._create_server(self.compute_client, name, **kwargs)
self.addCleanup(
self.delete_resource,
delete_method=lambda: self.compute_client.servers.delete(
server.id),
get_method=lambda: self.compute_client.servers.get(server.id))
return server
def create_alarm(self, **kwargs):
"""This method provides creation of alarm."""
if 'name' in kwargs:
kwargs['name'] = rand_name(kwargs['name'])
alarm = self.ceilometer_client.alarms.create(**kwargs)
self.objects_for_delete.append((self.ceilometer_client.alarms.delete,
alarm.alarm_id))
return alarm
def get_state(self, alarm_id):
"""This method provides getting state."""
return self.ceilometer_client.alarms.get_state(alarm_id=alarm_id)
def verify_state(self, alarm_id, state):
"""This method provides getting state."""
alarm_state_resp = self.get_state(alarm_id)
if not alarm_state_resp == state:
self.fail('State was not setted')
def wait_for_resource_status(self, resource_client, resource_id, status):
self.status_timeout(resource_client, resource_id, status)
def wait_for_alarm_status(self, alarm_id, status=None):
"""The method is a customization of test.status_timeout()."""
def check_status():
try:
alarm_state_resp = self.get_state(alarm_id)
except Exception:
alarm_state_resp = None
if status:
if alarm_state_resp == status:
return True
elif alarm_state_resp == 'alarm' or 'ok':
return True # All good.
LOG.debug("Waiting for state to get alarm status.")
if not fuel_health.test.call_until_true(check_status, 1000, 10):
actual_status = self.get_state(alarm_id)
self.fail(
"Timed out waiting to become alarm status. "
"Expected status:{exp_status}; "
"Actual status:{act_status}".format(
exp_status=status if status else "'alarm' or 'ok'",
act_status=actual_status))
def wait_for_object_sample(self, obj, query, ceilo_obj_type):
"""This method is to wait for sample to add it to database.
query example:
query=[
{'field':'resource',
'op':'eq',
'value':'000e6838-471b-4a14-8da6-655fcff23df1'
}]
"""
kwargs = {"q": query}
if ceilo_obj_type == 'sample':
method = self.ceilometer_client.samples.list
kwargs["meter_name"] = obj
elif ceilo_obj_type == "event":
query.append({'field': 'event_type', 'op': 'eq', 'value': obj})
method = self.ceilometer_client.events.list
def check_status():
try:
body = method(**kwargs)
except Exception:
body = None
if body:
return True
if not fuel_health.test.call_until_true(check_status, 600, 10):
self.fail(
"Timed out waiting for object: {obj} "
"with query:{query}".format(obj=obj, query=query))
def wait_for_statistic_of_metric(self, meter_name, query=None,
period=None):
"""The method is a customization of test.status_timeout()."""
def check_status():
stat_state_resp = self.ceilometer_client.statistics.list(
meter_name, q=query, period=period)
if len(stat_state_resp) > 0:
return True # All good.
LOG.debug("Waiting for while metrics will available.")
if not fuel_health.test.call_until_true(check_status, 600, 10):
self.fail("Timed out waiting to become alarm")
else:
return self.ceilometer_client.statistics.list(meter_name, q=query,
period=period)
def wait_for_ceilo_objects(self, object_list, query, ceilo_obj_type):
for obj in object_list:
self.wait_for_object_sample(obj, query, ceilo_obj_type)
def create_image_sample(self, image_id):
sample = self.ceilometer_client.samples.create(
resource_id=image_id, counter_name='image', counter_type='delta',
counter_unit='image', counter_volume=1,
resource_metadata={'user': 'example_metadata'})
return sample
def get_samples_count(self, meter_name, query):
return self.ceilometer_client.statistics.list(
meter_name=meter_name, q=query)[0].count
def wait_samples_count(self, meter_name, query, count):
def check_count():
new_count = self.get_samples_count(meter_name, query)
return new_count > count
if not fuel_health.test.call_until_true(check_count, 60, 1):
self.fail('Count of samples list isn\'t '
'greater than expected value')
def check_event_type(self, event_type):
event_list = [event.event_type for event
in self.ceilometer_client.event_types.list()]
if event_type not in event_list:
self.fail('"{event_type}" not found in event type list.'.format(
event_type=event_type))
def check_event_message_id(self, events_list, instance_id):
for event in events_list:
try:
if next(x['value'] for x in event.traits
if x['name'] == "instance_id") == instance_id:
return event.message_id
except StopIteration:
self.fail('Trait "instance_id" not found in trait list.')
self.fail('No events found for "{instance_id}" instance.'.format(
instance_id=instance_id))
def check_traits(self, event_type, traits):
trait_desc = [desc.name for desc in
self.ceilometer_client.trait_descriptions.list(
event_type)]
for trait in traits:
if trait not in trait_desc:
self.fail('Trait "{trait}" not found in trait list.'.format(
trait=trait))
def identity_helper(self):
user_pass = rand_name("ceilo-user-pass")
user_name = rand_name("ceilo-user-update")
tenant_name = rand_name("ceilo-tenant-update")
tenant = self.identity_client.tenants.create(rand_name("ceilo-tenant"))
self.objects_for_delete.append((
self.identity_client.tenants.delete, tenant))
self.identity_client.tenants.update(tenant.id, name=tenant_name)
user = self.identity_client.users.create(
rand_name("ceilo-user"), user_pass, tenant.id)
self.objects_for_delete.append((
self.identity_client.users.delete, user))
self.identity_client.users.update(user, name=user_name)
role = self.identity_v3_client.roles.create(rand_name("ceilo-role"))
self.identity_v3_client.roles.update(
role, user=user.id, project=tenant.id)
self.identity_v3_client.roles.grant(
role, user=user.id, project=tenant.id)
self.objects_for_delete.append((
self.identity_client.roles.delete, role))
user_client = self.manager_class()._get_identity_client(
user_name, user_pass, tenant_name, 3)
trust = user_client.trusts.create(
self.identity_v3_client.user_id, user.id, [role.name], tenant.id)
self.objects_for_delete.append((user_client.trusts.delete, trust))
group = self.identity_v3_client.groups.create(rand_name("ceilo-group"))
self.objects_for_delete.append((
self.identity_v3_client.groups.delete, group))
self.identity_v3_client.groups.update(
group, name=rand_name("ceilo-group-update"))
self.identity_v3_client.groups.delete(group)
user_client.trusts.delete(trust)
self.identity_v3_client.roles.revoke(
role, user=user.id, project=tenant.id)
self.identity_client.roles.delete(role)
self.identity_client.users.delete(user)
self.identity_client.tenants.delete(tenant)
return tenant, user, role, group, trust
def neutron_helper(self):
net = self.neutron_client.create_network(
{"network": {"name": rand_name("ceilo-net")}})["network"]
self.addCleanup(self.cleanup_resources,
[(self.neutron_client.delete_network, net["id"])])
self.neutron_client.update_network(
net["id"], {"network": {"name": rand_name("ceilo-net-update")}})
subnet = self.neutron_client.create_subnet(
{"subnet": {"name": rand_name("ceilo-subnet"),
"network_id": net["id"],
"ip_version": 4,
"cidr": "10.0.7.0/24"}})["subnet"]
self.addCleanup(self.cleanup_resources,
[(self.neutron_client.delete_subnet, subnet["id"])])
self.neutron_client.update_subnet(
subnet["id"], {"subnet": {"name": rand_name("ceilo-subnet")}})
port = self.neutron_client.create_port({
"port": {"name": rand_name("ceilo-port"),
"network_id": net["id"]}})['port']
self.addCleanup(self.cleanup_resources,
[(self.neutron_client.delete_port, port["id"])])
self.neutron_client.update_port(
port["id"], {"port": {"name": rand_name("ceilo-port-update")}})
router = self.neutron_client.create_router(
{"router": {"name": rand_name("ceilo-router")}})['router']
self.addCleanup(self.cleanup_resources,
[(self.neutron_client.delete_router, router["id"])])
self.neutron_client.update_router(
router["id"],
{"router": {"name": rand_name("ceilo-router-update")}})
external_network = self.find_external_network()
try:
body = {
"floatingip": {
"floating_network_id": external_network["id"]
}
}
fl_ip = self.neutron_client.create_floatingip(body)["floatingip"]
except neutron_exc.IpAddressGenerationFailureClient:
self.fail('No more IP addresses available on external network.')
self.addCleanup(self.cleanup_resources,
[(self.neutron_client.delete_floatingip, fl_ip["id"])])
self.neutron_client.update_floatingip(
fl_ip["id"], {"floatingip": {"port_id": None}})
self.neutron_client.delete_floatingip(fl_ip["id"])
self.neutron_client.delete_router(router["id"])
self.neutron_client.delete_port(port["id"])
self.neutron_client.delete_subnet(subnet["id"])
self.neutron_client.delete_network(net["id"])
return net, subnet, port, router, fl_ip
def sahara_helper(self, image_id, plugin_name, hadoop_version):
# Find flavor id for sahara instances
flavor_id = next(
flavor.id for flavor in
self.compute_client.flavors.list() if flavor.name == 'm1.small')
private_net_id, floating_ip_pool = self.create_network_resources()
# Create json for node group
node_group = {'name': 'all-in-one',
'flavor_id': flavor_id,
'node_processes': ['nodemanager', 'datanode',
'resourcemanager', 'namenode',
'historyserver'],
'count': 1,
'auto_security_group': True}
if floating_ip_pool:
node_group['floating_ip_pool'] = floating_ip_pool
# Create json for Sahara cluster
cluster_json = {'name': rand_name("ceilo-cluster"),
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
'default_image_id': image_id,
'cluster_configs': {'HDFS': {'dfs.replication': 1}},
'node_groups': [node_group],
'net_id': private_net_id}
# Create Sahara cluster
cluster = self.sahara_client.clusters.create(**cluster_json)
self.addCleanup(
self.delete_resource,
delete_method=lambda: self.sahara_client.clusters.delete(
cluster.id),
get_method=lambda: self.sahara_client.clusters.get(cluster.id))
# Wait for change cluster state for metric: cluster.update
def check_status():
cluster_state = self.sahara_client.clusters.get(cluster.id).status
return cluster_state in ['Waiting', 'Active', 'Error']
fuel_health.test.call_until_true(check_status, 300, 1)
# Delete cluster
self.sahara_client.clusters.delete(cluster.id)
return cluster
def glance_helper(self):
image = self.glance_client.images.create(
name=rand_name('ostf-ceilo-image'))
self.objects_for_delete.append((self.glance_client.images.delete,
image.id))
self.glance_client.images.update(image.id, data='data',
disk_format='qcow2',
container_format='bare')
self.glance_client.images.upload(image.id, 'upload_data')
self.glance_client.images.data(image.id)
self.glance_client.images.delete(image.id)
return image
def volume_helper(self, instance):
device = '/dev/vdb'
# Create a volume
volume = self.volume_client.volumes.create(
name=rand_name('ost1_test-ceilo-volume'), size=1)
self.addCleanup(
self.delete_resource,
delete_method=lambda: self.volume_client.volumes.delete(volume),
get_method=lambda: self.volume_client.volumes.get(volume.id))
# Wait for "Available" status of the volume
self.wait_for_resource_status(
self.volume_client.volumes, volume.id, 'available')
# Resize the volume
self.volume_client.volumes.extend(volume, 2)
self.wait_for_resource_status(
self.volume_client.volumes, volume.id, 'available')
# Create a volume snapshot
snapshot = self.volume_client.volume_snapshots.create(
volume.id, name=rand_name('ost1_test-'))
self.addCleanup(
self.delete_resource,
delete_method=lambda: self.volume_client.volume_snapshots.delete(
snapshot),
get_method=lambda: self.volume_client.volume_snapshots.get(
snapshot.id))
# Wait for "Available" status of the snapshot
self.wait_for_resource_status(
self.volume_client.volume_snapshots, snapshot.id, 'available')
# Update the volume name
self.volume_client.volumes.update(volume, name="ost1_test-update")
# Attach the volume to the instance
self.volume_client.volumes.attach(volume.id, instance.id, device)
# Detach the volume from the instance
self.volume_client.volumes.detach(volume.id)
# Delete the volume snapshot
self.delete_resource(
delete_method=lambda: self.volume_client.volume_snapshots.delete(
snapshot),
get_method=lambda: self.volume_client.volume_snapshots.get(
snapshot.id))
# Delete the volume
self.delete_resource(
delete_method=lambda: self.volume_client.volumes.delete(volume),
get_method=lambda: self.volume_client.volumes.get(volume.id))
return volume, snapshot
@staticmethod
def cleanup_resources(object_list):
for method, resource in object_list:
try:
method(resource)
except Exception:
LOG.exception("")
@classmethod
def tearDownClass(cls):
if cls.manager.clients_initialized:
cls.cleanup_resources(cls.objects_for_delete)
super(CeilometerBaseTest, cls).tearDownClass()

View File

@ -123,22 +123,6 @@ def cleanup(cluster_deployment_info):
except Exception:
LOG.exception('Failed murano cluster cleanup')
if 'ceilometer' in cluster_deployment_info:
try:
ceilometer_client = manager._get_ceilometer_client()
if ceilometer_client is not None:
alarms = ceilometer_client.alarms.list()
for a in alarms:
if a.name.startswith('ost1_test-'):
try:
LOG.info('Start alarms deletion.')
ceilometer_client.alarms.delete(a.id)
except Exception as exc:
LOG.debug(exc)
except Exception as exc:
LOG.warning('Something wrong with ceilometer client. '
'Exception: {0}'.format(exc))
if 'heat' in cluster_deployment_info:
try:
heat_client = manager._get_heat_client()

View File

@ -204,9 +204,6 @@ ComputeGroup = [
cfg.StrOpt('libvirt_type',
default='qemu',
help="Type of hypervisor to use."),
cfg.BoolOpt('use_vcenter',
default=False,
help="Usage of vCenter"),
]
@ -290,9 +287,6 @@ VolumeGroup = [
cfg.BoolOpt('cinder_node_exist',
default=True,
help="Allow to run tests if cinder exist"),
cfg.BoolOpt('cinder_vmware_node_exist',
default=True,
help="Allow to run tests if cinder-vmware exist"),
cfg.BoolOpt('ceph_exist',
default=True,
help="Allow to run tests if ceph exist"),
@ -305,9 +299,6 @@ VolumeGroup = [
cfg.StrOpt('backend2_name',
default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
cfg.StrOpt('cinder_vmware_storage_az',
default='vcenter',
help="Name of storage availability zone for cinder-vmware."),
]
@ -615,8 +606,6 @@ class NailgunConfig(object):
LOG.info('set proxy successful')
self._parse_cluster_generated_data()
LOG.info('parse generated successful')
self._parse_vmware_attributes()
LOG.info('parse vmware attributes successful')
except exceptions.SetProxy as exc:
raise exc
except Exception:
@ -650,9 +639,6 @@ class NailgunConfig(object):
access_data['password']['value']
)
self.compute.libvirt_type = common_data['libvirt_type']['value']
# After removing vmware support we have no attribute use_vcenter
self.compute.use_vcenter = common_data.get('use_vcenter', {}).get(
'value', False)
self.compute.auto_assign_floating_ip = common_data[
'auto_assign_floating_ip']['value']
@ -698,8 +684,6 @@ class NailgunConfig(object):
cinder_nodes.extend(
filter(lambda node: cinder_role in node['roles'], data))
cinder_vmware_nodes = filter(lambda node: 'cinder-vmware' in
node['roles'], data)
controller_ips = []
controller_names = []
public_ips = []
@ -727,8 +711,6 @@ class NailgunConfig(object):
self.compute.online_controller_names = online_controller_names
if not cinder_nodes:
self.volume.cinder_node_exist = False
if not cinder_vmware_nodes:
self.volume.cinder_vmware_node_exist = False
compute_nodes = filter(lambda node: 'compute' in node['roles'],
data)
@ -845,13 +827,6 @@ class NailgunConfig(object):
self.identity.url = data['horizon_url'] + 'dashboard'
self.identity.uri = data['keystone_url'] + 'v2.0/'
def _parse_vmware_attributes(self):
if self.volume.cinder_vmware_node_exist:
api_url = '/api/clusters/%s/vmware_attributes' % self.cluster_id
data = self.req_session.get(self.nailgun_url + api_url).json()
az = data['editable']['value']['availability_zones'][0]['az_name']
self.volume.cinder_vmware_storage_az = "{0}-cinder".format(az)
def get_keystone_vip(self):
if 'service_endpoint' in self.network.raw_data \
and not self.fuel.ssl_data:

View File

@ -1,75 +0,0 @@
heat_template_version: 2013-05-23
parameters:
KeyName:
type: string
InstanceType:
type: string
ImageId:
type: string
SecurityGroup:
type: string
Net:
type: string
resources:
my_asg:
type: OS::Heat::AutoScalingGroup
properties:
resource:
type: OS::Nova::Server
properties:
metadata: {"metering.stack": {get_param: "OS::stack_id"}}
key_name: { get_param: KeyName }
image: { get_param: ImageId }
flavor: { get_param: InstanceType }
security_groups:
- get_param: SecurityGroup
networks:
- network: {get_param: Net}
min_size: 1
max_size: 3
scale_up_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: my_asg}
cooldown: 60
scaling_adjustment: 2
scale_down_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: my_asg}
cooldown: 60
scaling_adjustment: '-1'
cpu_alarm_high:
type: OS::Ceilometer::Alarm
properties:
description: Scale-up if count of instance <= 1 for 1 minute
meter_name: network.incoming.bytes
statistic: count
period: 60
evaluation_periods: 1
threshold: 1
alarm_actions:
- {get_attr: [scale_up_policy, alarm_url]}
matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}}
comparison_operator: le
cpu_alarm_low:
type: OS::Ceilometer::Alarm
properties:
description: Scale-down if maximum count of instance > 2 for 1 minutes
meter_name: network.incoming.bytes
statistic: count
period: 60
evaluation_periods: 1
threshold: 2
alarm_actions:
- {get_attr: [scale_down_policy, alarm_url]}
matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}}
comparison_operator: gt

View File

@ -1,71 +0,0 @@
heat_template_version: 2013-05-23
parameters:
KeyName:
type: string
InstanceType:
type: string
ImageId:
type: string
SecurityGroup:
type: string
resources:
my_asg:
type: OS::Heat::AutoScalingGroup
properties:
resource:
type: OS::Nova::Server
properties:
metadata: {"metering.stack": {get_param: "OS::stack_id"}}
key_name: { get_param: KeyName }
image: { get_param: ImageId }
flavor: { get_param: InstanceType }
security_groups:
- get_param: SecurityGroup
min_size: 1
max_size: 3
scale_up_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: my_asg}
cooldown: 60
scaling_adjustment: 2
scale_down_policy:
type: OS::Heat::ScalingPolicy
properties:
adjustment_type: change_in_capacity
auto_scaling_group_id: {get_resource: my_asg}
cooldown: 60
scaling_adjustment: '-1'
cpu_alarm_high:
type: OS::Ceilometer::Alarm
properties:
description: Scale-up if count of instance <= 1 for 1 minute
meter_name: network.incoming.bytes
statistic: count
period: 60
evaluation_periods: 1
threshold: 1
alarm_actions:
- {get_attr: [scale_up_policy, alarm_url]}
matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}}
comparison_operator: le
cpu_alarm_low:
type: OS::Ceilometer::Alarm
properties:
description: Scale-down if maximum count of instance > 2 for 1 minutes
meter_name: network.incoming.bytes
statistic: count
period: 60
evaluation_periods: 1
threshold: 2
alarm_actions:
- {get_attr: [scale_down_policy, alarm_url]}
matching_metadata: {'metadata.user_metadata.stack': {get_param: "OS::stack_id"}}
comparison_operator: gt

View File

@ -171,27 +171,6 @@ class HeatBaseTest(fuel_health.nmanager.PlatformServicesBaseClass):
return instances
def wait_for_autoscaling(self, exp_count,
timeout, interval, reduced_stack_name):
"""This method checks whether autoscaling finished or not.
It checks number of instances owned by stack, instances
belonging to stack are defined by special name pattern
(reduced_stack_name). It is not possible to get stack instances
using get_stack_objects, because instances are created as part of
autoscaling group resource.
"""
LOG.debug('Expected number of instances'
' owned by stack is {0}'.format(exp_count))
def count_instances(reduced_stack_name):
instances = self.get_instances_by_name_mask(reduced_stack_name)
return len(instances) == exp_count
return fuel_health.test.call_until_true(
count_instances, timeout, interval, reduced_stack_name)
def wait_for_vm_ready_for_load(self, conn_string, timeout, interval):
"""Wait for fake file to be created on the instance.

View File

@ -38,11 +38,6 @@ try:
except Exception:
LOG.exception("")
LOG.warning('Sahara client could not be imported.')
try:
import ceilometerclient.v2.client
except Exception:
LOG.exception("")
LOG.warning('Ceilometer client could not be imported.')
try:
import neutronclient.neutron.client
except Exception:
@ -116,7 +111,6 @@ class OfficialClientManager(fuel_health.manager.Manager):
self.heat_client = self._get_heat_client()
self.murano_client = self._get_murano_client()
self.sahara_client = self._get_sahara_client()
self.ceilometer_client = self._get_ceilometer_client()
self.neutron_client = self._get_neutron_client()
self.glance_client_v1 = self._get_glance_client(version=1)
self.ironic_client = self._get_ironic_client()
@ -133,7 +127,6 @@ class OfficialClientManager(fuel_health.manager.Manager):
'heat_client',
'murano_client',
'sahara_client',
'ceilometer_client',
'neutron_client',
'ironic_client',
'aodh_client',
@ -321,20 +314,6 @@ class OfficialClientManager(fuel_health.manager.Manager):
input_auth_token=auth_token,
insecure=True)
def _get_ceilometer_client(self):
keystone = self._get_identity_client()
try:
endpoint = keystone.service_catalog.url_for(
service_type='metering',
endpoint_type='publicURL')
except keystoneclient.exceptions.EndpointNotFound:
LOG.warning('Can not initialize ceilometer client')
return None
return ceilometerclient.v2.Client(endpoint=endpoint, insecure=True,
verify=False,
token=lambda: keystone.auth_token)
def _get_neutron_client(self, version='2.0'):
keystone = self._get_identity_client()
@ -485,12 +464,8 @@ class OfficialClientTest(fuel_health.test.TestCase):
'with unexpected result. ')
self.fail("Instance is not reachable by IP.")
def get_availability_zone(self, image_id=None):
disk = self.glance_client_v1.images.get(image_id).disk_format
if disk == 'vmdk':
az_name = 'vcenter'
else:
az_name = 'nova'
def get_availability_zone(self):
az_name = 'nova'
return az_name
def check_clients_state(self):
@ -738,7 +713,7 @@ class NovaNetworkScenarioTest(OfficialClientTest):
base_image_id = self.get_image_from_name()
if not az_name:
az_name = self.get_availability_zone(image_id=base_image_id)
az_name = self.get_availability_zone()
if not flavor_id:
if not self.find_micro_flavor():
@ -1365,7 +1340,7 @@ class SmokeChecksTest(OfficialClientTest):
name = rand_name('ost1_test-boot-volume-instance')
base_image_id = self.get_image_from_name()
bd_map = {'vda': volume.id + ':::0'}
az_name = self.get_availability_zone(image_id=base_image_id)
az_name = self.get_availability_zone()
if 'neutron' in self.config.network.network_provider:
network = [net.id for net in
self.compute_client.networks.list()
@ -1405,7 +1380,7 @@ class SmokeChecksTest(OfficialClientTest):
name = rand_name('ost1_test-volume-instance')
base_image_id = self.get_image_from_name(img_name=img_name)
az_name = self.get_availability_zone(image_id=base_image_id)
az_name = self.get_availability_zone()
if 'neutron' in self.config.network.network_provider:
network = [net.id for net in

View File

@ -1,55 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from fuel_health import ceilometermanager
class CeilometerApiTests(ceilometermanager.CeilometerBaseTest):
"""TestClass contains tests that check basic Ceilometer functionality."""
def test_list_meters(self):
"""Ceilometer test to list meters, alarms, resources and events
Target component: Ceilometer
Scenario:
1. Request the list of meters with query: disk_format=qcow2.
2. Request the list of alarms.
3. Request the list of resources created for the last hour.
4. Request the list of events created for the last hour.
Duration: 180 s.
Deployment tags: Ceilometer
"""
fail_msg = 'Failed to get list of meters.'
q = [{'field': 'metadata.disk_format', 'op': 'eq', 'value': 'qcow2'}]
self.verify(60, self.ceilometer_client.meters.list,
1, fail_msg, 'getting list of meters', q)
fail_msg = 'Failed to get list of alarms.'
self.verify(60, self.ceilometer_client.alarms.list,
2, fail_msg, 'getting list of alarms')
fail_msg = 'Failed to get list of resources.'
an_hour_ago = (datetime.datetime.now() -
datetime.timedelta(hours=1)).isoformat()
q = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}]
self.verify(60, self.ceilometer_client.resources.list,
3, fail_msg, 'getting list of resources', q)
fail_msg = 'Failed to get list of events.'
self.verify(60, self.ceilometer_client.events.list,
4, fail_msg, 'getting list of events', q)

View File

@ -43,8 +43,7 @@ class TestInstanceLiveMigration(nmanager.NovaNetworkScenarioTest):
def setUp(self):
super(TestInstanceLiveMigration, self).setUp()
self.check_clients_state()
if not self.config.compute.compute_nodes and \
self.config.compute.libvirt_type != 'vcenter':
if not self.config.compute.compute_nodes:
self.skipTest('There are no compute nodes')
if len(self.config.compute.compute_nodes) < 2:
self.skipTest('To test live migration at least'

View File

@ -270,10 +270,7 @@ class TestNovaNetwork(nmanager.NovaNetworkScenarioTest):
for addr in server.addresses:
if addr.startswith('novanetwork'):
instance_ip = server.addresses[addr][0]['addr']
if not self.config.compute.use_vcenter:
compute = getattr(server, 'OS-EXT-SRV-ATTR:host')
else:
compute = None
compute = getattr(server, 'OS-EXT-SRV-ATTR:host')
except Exception:
LOG.exception("Unable to get instance details")
self.fail("Step 3 failed: cannot get instance details. "

View File

@ -80,11 +80,7 @@ class TestImageAction(nmanager.SmokeChecksTest):
"something is wrong with nova services.")
else:
flavor_id = self.micro_flavors[0]
disk = self.glance_client_v1.images.get(image_id).disk_format
if disk == 'vmdk':
az_name = 'vcenter'
else:
az_name = 'nova'
az_name = 'nova'
name = rand_name('ost1_test-image')
client = self.compute_client
LOG.debug("name:%s, image:%s" % (name, image_id))

View File

@ -1,576 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import nmanager
from fuel_health import test
LOG = logging.getLogger(__name__)
class TestVcenter(nmanager.NovaNetworkScenarioTest):
"""Test suit verifies:
- Instance creation
- Floating ip creation
- Instance connectivity by floating IP
"""
@classmethod
def setUpClass(cls):
super(TestVcenter, cls).setUpClass()
if cls.manager.clients_initialized:
cls.tenant_id = cls.manager._get_identity_client(
cls.config.identity.admin_username,
cls.config.identity.admin_password,
cls.config.identity.admin_tenant_name).tenant_id
cls.keypairs = {}
cls.security_groups = {}
cls.network = []
cls.servers = []
cls.floating_ips = []
def setUp(self):
super(TestVcenter, self).setUp()
self.check_clients_state()
def tearDown(self):
super(TestVcenter, self).tearDown()
if self.manager.clients_initialized:
if self.servers:
for server in self.servers:
try:
self._delete_server(server)
self.servers.remove(server)
except Exception:
LOG.exception("Server was already deleted.")
@classmethod
def find_flavor_id(cls):
flavors = dict([flavor.ram, flavor.id]
for flavor in cls.compute_client.flavors.list()
if flavor.ram >= 128)
return flavors[sorted(flavors)[0]]
def test_1_vcenter_create_servers(self):
"""vCenter: Launch instance
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Delete instance.
Duration: 200 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
img_name = 'TestVM-VMDK'
self.manager.config.compute.image_name = img_name
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25,
self._create_security_group,
1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
flavor_id = self.find_flavor_id()
server = self.verify(
200,
self._create_server,
2,
"Creating instance using the new security group has failed.",
'image creation',
self.compute_client, name, security_groups, flavor_id, None,
img_name)
self.verify(30, self._delete_server, 3,
"Server can not be deleted.",
"server deletion", server)
def test_3_vcenter_check_public_instance_connectivity_from_instance(self):
"""vCenter: Check network connectivity from instance via floating IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
3. Create a new floating IP
4. Assign the new floating IP to the instance.
5. Check connectivity to the floating IP using ping command.
6. Check that public IP 8.8.8.8 can be pinged from instance.
7. Disassociate server floating ip.
8. Delete floating ip
9. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
img_name = 'TestVM-VMDK'
self.manager.config.compute.image_name = img_name
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
flavor_id = self.find_flavor_id()
server = self.verify(250, self._create_server, 2,
"Server can not be created.",
"server creation",
self.compute_client, name, security_groups,
flavor_id, None, img_name)
floating_ip = self.verify(
20,
self._create_floating_ip,
3,
"Floating IP can not be created.",
'floating IP creation')
self.verify(20, self._assign_floating_ip_to_instance,
4, "Floating IP can not be assigned.",
'floating IP assignment',
self.compute_client, server, floating_ip)
self.floating_ips.append(floating_ip)
ip_address = floating_ip.ip
LOG.info('is address is {0}'.format(ip_address))
LOG.debug(ip_address)
self.verify(600, self._check_vm_connectivity, 5,
"VM connectivity doesn`t function properly.",
'VM connectivity checking', ip_address,
30, (6, 60))
self.verify(600, self._check_connectivity_from_vm,
6, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM', ip_address,
30, (6, 60))
self.verify(20, self.compute_client.servers.remove_floating_ip,
7, "Floating IP cannot be removed.",
"removing floating IP", server, floating_ip)
self.verify(20, self.compute_client.floating_ips.delete,
8, "Floating IP cannot be deleted.",
"floating IP deletion", floating_ip)
if self.floating_ips:
self.floating_ips.remove(floating_ip)
self.verify(30, self._delete_server, 9,
"Server can not be deleted. ",
"server deletion", server)
def test_2_vcenter_check_internet_connectivity_without_floatingIP(self):
"""vCenter: Check network connectivity from instance without floating \
IP
Target component: Nova
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create an instance using the new security group.
(if it doesn`t exist yet).
3. Check that public IP 8.8.8.8 can be pinged from instance.
4. Delete server.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: nova_network, use_vcenter
"""
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation', self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
img_name = 'TestVM-VMDK'
compute = None
server = self.verify(
250, self._create_server, 2,
"Server can not be created.",
'server creation',
self.compute_client, name, security_groups, None, None, img_name)
try:
for addr in server.addresses:
if addr.startswith('novanetwork'):
instance_ip = server.addresses[addr][0]['addr']
except Exception:
LOG.exception("")
self.fail("Step 3 failed: cannot get instance details. "
"Please refer to OpenStack logs for more details.")
self.verify(400, self._check_connectivity_from_vm,
3, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM',
instance_ip, 30, (6, 30), compute)
self.verify(30, self._delete_server, 4,
"Server can not be deleted. ",
"server deletion", server)
class TestVcenterImageAction(nmanager.SmokeChecksTest):
"""Test class verifies the following:
- verify that image can be created;
- verify that instance can be booted from created image;
- verify that snapshot can be created from an instance;
- verify that instance can be booted from a snapshot.
"""
@classmethod
def setUpClass(cls):
super(TestVcenterImageAction, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = cls.find_micro_flavor()
@classmethod
def tearDownClass(cls):
super(TestVcenterImageAction, cls).tearDownClass()
def setUp(self):
super(TestVcenterImageAction, self).setUp()
self.check_clients_state()
def _wait_for_server_status(self, server, status):
self.status_timeout(self.compute_client.servers,
server.id,
status)
def _wait_for_image_status(self, image_id, status):
self.status_timeout(self.compute_client.images, image_id, status)
def _wait_for_server_deletion(self, server):
def is_deletion_complete():
# Deletion testing is only required for objects whose
# existence cannot be checked via retrieval.
if isinstance(server, dict):
return True
try:
server.get()
except Exception as e:
# Clients are expected to return an exception
# called 'NotFound' if retrieval fails.
if e.__class__.__name__ == 'NotFound':
return True
self.error_msg.append(e)
LOG.exception("")
return False
# Block until resource deletion has completed or timed-out
test.call_until_true(is_deletion_complete, 10, 1)
def _boot_image(self, image_id, flavor_id):
name = rand_name('ost1_test-image')
client = self.compute_client
LOG.debug("name:%s, image:%s" % (name, image_id))
if 'neutron' in self.config.network.network_provider:
network = [net.id for net in
self.compute_client.networks.list()
if net.label == self.private_net]
if network:
create_kwargs = {
'nics': [
{'net-id': network[0]},
],
}
else:
self.fail("Default private network '{0}' isn't present. "
"Please verify it is properly created.".
format(self.private_net))
server = client.servers.create(name=name,
image=image_id,
flavor=flavor_id,
**create_kwargs)
else:
server = client.servers.create(name=name,
image=image_id,
flavor=self.micro_flavors[0])
self.set_resource(name, server)
# self.addCleanup(self.compute_client.servers.delete, server)
self.verify_response_body_content(
name, server.name,
msg="Please refer to OpenStack logs for more details.")
self._wait_for_server_status(server, 'ACTIVE')
server = client.servers.get(server) # getting network information
LOG.debug("server:%s" % server)
return server
def _create_image(self, server):
snapshot_name = rand_name('ost1_test-snapshot-')
create_image_client = self.compute_client.servers.create_image
image_id = create_image_client(server, snapshot_name)
self.addCleanup(self.compute_client.images.delete, image_id)
self._wait_for_server_status(server, 'ACTIVE')
self._wait_for_image_status(image_id, 'ACTIVE')
snapshot_image = self.compute_client.images.get(image_id)
self.verify_response_body_content(
snapshot_name, snapshot_image.name,
msg="Please refer to OpenStack logs for more details.")
return image_id
def test_4_snapshot(self):
"""vCenter: Launch instance, create snapshot, launch instance from \
snapshot
Target component: Glance
Scenario:
1. Create flavor.
1. Get existing image by name.
2. Launch an instance using the default image.
3. Make snapshot of the created instance.
4. Delete the instance created in step 1.
5. Wait while instance deleted
6. Launch another instance from the snapshot created in step 2.
7. Delete server.
9. Delete flavor.
Duration: 300 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
img_name = 'TestVM-VMDK'
self.manager.config.compute.image_name = img_name
self.check_image_exists()
fail_msg = "Flavor was not created properly."
flavor = self.verify(30, self._create_flavors, 1,
fail_msg,
"flavor creation",
self.compute_client, 256, 0)
image = self.verify(30, self.get_image_from_name, 2,
"Image can not be retrieved.",
"getting image by name",
img_name)
server = self.verify(180, self._boot_image, 3,
"Image can not be booted.",
"image booting",
image, flavor.id)
# snapshot the instance
snapshot_image_id = self.verify(700, self._create_image, 4,
"Snapshot of an"
" instance can not be created.",
'snapshotting an instance',
server)
self.verify(180, self.compute_client.servers.delete, 5,
"Instance can not be deleted.",
'Instance deletion',
server)
self.verify(180, self._wait_for_server_deletion, 6,
"Instance can not be deleted.",
'Wait for instance deletion complete',
server)
server = self.verify(700, self._boot_image, 7,
"Instance can not be launched from snapshot.",
'booting instance from snapshot',
snapshot_image_id, flavor.id)
self.verify(30, self._delete_server, 8,
"Server can not be deleted.",
"server deletion", server)
msg = "Flavor failed to be deleted."
self.verify(30, self._delete_flavors, 9, msg,
"flavor deletion", self.compute_client, flavor)
class VcenterVolumesTest(nmanager.SmokeChecksTest):
@classmethod
def setUpClass(cls):
super(VcenterVolumesTest, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = cls.find_micro_flavor()
def setUp(self):
super(VcenterVolumesTest, self).setUp()
self.check_clients_state()
if (not self.config.volume.cinder_vmware_node_exist):
self.skipTest('There are no cinder-vmware nodes')
@classmethod
def tearDownClass(cls):
super(VcenterVolumesTest, cls).tearDownClass()
@classmethod
def find_flavor_id(cls):
flavors = dict([flavor.ram, flavor.id]
for flavor in cls.compute_client.flavors.list()
if flavor.ram >= 128)
return flavors[sorted(flavors)[0]]
def _wait_for_volume_status(self, volume, status):
self.status_timeout(self.volume_client.volumes, volume.id, status)
def _wait_for_instance_status(self, server, status):
self.status_timeout(self.compute_client.servers, server.id, status)
def _create_server(self, client, img_name=None):
flavor_id = self.find_flavor_id()
if not flavor_id:
self.fail("Flavor for tests was not found. Seems that "
"something is wrong with nova services.")
name = rand_name('ost1_test-volume-instance')
base_image_id = self.get_image_from_name(img_name=img_name)
az_name = self.get_availability_zone(image_id=base_image_id)
if 'neutron' in self.config.network.network_provider:
network = [net.id for net in
self.compute_client.networks.list()
if net.label == self.private_net]
if network:
create_kwargs = {'nics': [{'net-id': network[0]}]}
else:
self.fail("Default private network '{0}' isn't present. "
"Please verify it is properly created.".
format(self.private_net))
server = client.servers.create(
name, base_image_id, flavor_id,
availability_zone=az_name,
**create_kwargs)
else:
server = client.servers.create(name, base_image_id,
self.micro_flavors[0].id,
availability_zone=az_name)
self.verify_response_body_content(server.name,
name,
"Instance creation failed")
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = self._wait_server_param(client, server, 'addresses', 5, 1)
self.set_resource(name, server)
return server
def test_5_vcenter_volume_create(self):
"""vCenter: Create volume and attach it to instance
Target component: Compute
Scenario:
1. Create a new small-size volume.
2. Wait for volume status to become "available".
3. Create new instance.
4. Wait for "Active" status
5. Attach volume to an instance.
6. Check volume status is "in use".
7. Get information on the created volume by its id.
8. Detach volume from the instance.
9. Check volume has "available" status.
10. Delete volume.
11. Verify that volume deleted
12. Delete server.
Duration: 350 s.
Available since release: 2014.2-6.1
Deployment tags: use_vcenter
"""
msg_s1 = 'Volume was not created.'
img_name = 'TestVM-VMDK'
self.manager.config.compute.image_name = img_name
self.check_image_exists()
az = self.config.volume.cinder_vmware_storage_az
# Create volume
volume = self.verify(120, self._create_volume, 1,
msg_s1,
"volume creation",
self.volume_client, None, availability_zone=az)
self.verify(200, self._wait_for_volume_status, 2,
msg_s1,
"volume becoming 'available'",
volume, 'available')
# create instance
instance = self.verify(200, self._create_server, 3,
"Instance creation failed. ",
"server creation",
self.compute_client, img_name)
self.verify(200, self._wait_for_instance_status, 4,
'Instance status did not become "available".',
"instance becoming 'available'",
instance, 'ACTIVE')
# Attach volume
self.verify(120, self._attach_volume_to_instance, 5,
'Volume couldn`t be attached.',
'volume attachment',
volume, instance.id)
self.verify(180, self._wait_for_volume_status, 6,
'Attached volume status did not become "in-use".',
"volume becoming 'in-use'",
volume, 'in-use')
# get volume details
self.verify(20, self.volume_client.volumes.get, 7,
"Can not retrieve volume details. ",
"retrieving volume details", volume.id)
# detach volume
self.verify(50, self._detach_volume, 8,
'Can not detach volume. ',
"volume detachment",
instance.id, volume.id)
self.verify(120, self._wait_for_volume_status, 9,
'Volume status did not become "available".',
"volume becoming 'available'",
volume, 'available')
self.verify(50, self.volume_client.volumes.delete, 10,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(50, self.verify_volume_deletion, 11,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(30, self._delete_server, 12,
"Can not delete server. ",
"server deletion",
instance)

View File

@ -1,552 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from fuel_health import ceilometermanager
from fuel_health.common.utils.data_utils import rand_name
class CeilometerApiPlatformTests(ceilometermanager.CeilometerBaseTest):
"""TestClass contains tests that check basic Ceilometer functionality."""
def test_create_update_delete_alarm(self):
"""Ceilometer test to create, update, check and delete alarm
Target component: Ceilometer
Scenario:
1. Get the statistic of a metric for the last hour.
2. Create an alarm.
3. Get the alarm.
4. List alarms.
5. Wait for 'ok' alarm state.
6. Update the alarm.
7. Wait for 'alarm' alarm state.
8. Get the alarm history.
9. Set the alarm state to 'insufficient data'.
10. Verify that the alarm state is 'insufficient data'.
11. Delete the alarm.
Duration: 120 s.
Deployment tags: Ceilometer
"""
fail_msg = 'Failed to get statistic of metric.'
msg = 'getting statistic of metric'
an_hour_ago = (datetime.datetime.now() -
datetime.timedelta(hours=1)).isoformat()
query = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}]
self.verify(600, self.wait_for_statistic_of_metric, 1,
fail_msg, msg, meter_name='image', query=query)
fail_msg = 'Failed to create alarm.'
msg = 'creating alarm'
alarm = self.verify(60, self.create_alarm, 2,
fail_msg, msg,
meter_name='image',
threshold=0.9,
name=rand_name('ceilometer-alarm'),
period=600,
statistic='avg',
comparison_operator='lt')
fail_msg = 'Failed to get alarm.'
msg = 'getting alarm'
self.verify(60, self.ceilometer_client.alarms.get, 3,
fail_msg, msg, alarm.alarm_id)
fail_msg = 'Failed to list alarms.'
msg = 'listing alarms'
query = [{'field': 'project', 'op': 'eq', 'value': alarm.project_id}]
self.verify(60, self.ceilometer_client.alarms.list, 4,
fail_msg, msg, q=query)
fail_msg = 'Failed while waiting for alarm state to become "ok".'
msg = 'waiting for alarm state to become "ok"'
self.verify(1000, self.wait_for_alarm_status, 5,
fail_msg, msg, alarm.alarm_id, 'ok')
fail_msg = 'Failed to update alarm.'
msg = 'updating alarm'
self.verify(60, self.ceilometer_client.alarms.update, 6,
fail_msg, msg, alarm_id=alarm.alarm_id, threshold=1.1)
fail_msg = 'Failed while waiting for alarm state to become "alarm".'
msg = 'waiting for alarm state to become "alarm"'
self.verify(1000, self.wait_for_alarm_status, 7,
fail_msg, msg, alarm.alarm_id, 'alarm')
fail_msg = 'Failed to get alarm history.'
msg = 'getting alarm history'
self.verify(60, self.ceilometer_client.alarms.get_history, 8,
fail_msg, msg, alarm_id=alarm.alarm_id)
fail_msg = 'Failed to set alarm state to "insufficient data".'
msg = 'setting alarm state to "insufficient data"'
self.verify(60, self.ceilometer_client.alarms.set_state, 9,
fail_msg, msg, alarm_id=alarm.alarm_id,
state='insufficient data')
fail_msg = 'Failed while verifying alarm state.'
msg = 'verifying alarm state'
self.verify(60, self.verify_state, 10,
fail_msg, msg, alarm_id=alarm.alarm_id,
state='insufficient data')
fail_msg = 'Failed to delete alarm.'
msg = 'deleting alarm'
self.verify(60, self.ceilometer_client.alarms.delete, 11,
fail_msg, msg, alarm_id=alarm.alarm_id)
@ceilometermanager.check_compute_nodes()
def test_check_alarm_state(self):
"""Ceilometer test to check alarm state and get Nova notifications
Target component: Ceilometer
Scenario:
1. Create an instance.
2. Wait for 'ACTIVE' status of the instance.
3. Get notifications.
4. Get the statistic notification:vcpus.
5. Create an alarm for the summary statistic notification:vcpus.
6. Wait for the alarm state to become 'alarm' or 'ok'.
Duration: 90 s.
Deployment tags: Ceilometer, qemu | kvm
"""
self.check_image_exists()
private_net_id, _ = self.create_network_resources()
fail_msg = 'Failed to create instance.'
msg = 'creating instance'
name = rand_name('ost1_test-ceilo-instance-')
instance = self.verify(600, self.create_server, 1, fail_msg, msg, name,
net_id=private_net_id)
fail_msg = 'Failed while waiting for "ACTIVE" status of instance.'
msg = 'waiting for "ACTIVE" status of instance'
self.verify(200, self.wait_for_resource_status, 2,
fail_msg, msg, self.compute_client.servers,
instance.id, 'ACTIVE')
fail_msg = 'Failed to get notifications.'
msg = 'getting notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': instance.id}]
self.verify(300, self.wait_for_ceilo_objects, 3,
fail_msg, msg, self.nova_notifications, query, 'sample')
fail_msg = 'Failed to get statistic notification:cpu_util.'
msg = 'getting statistic notification:cpu_util'
an_hour_ago = (datetime.datetime.now() -
datetime.timedelta(hours=1)).isoformat()
query = [{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}]
vcpus_stat = self.verify(60, self.wait_for_statistic_of_metric, 4,
fail_msg, msg, 'vcpus', query)
fail_msg = ('Failed to create alarm for '
'summary statistic notification:cpu_util.')
msg = 'creating alarm for summary statistic notification:cpu_util'
threshold = vcpus_stat[0].sum - 1
alarm = self.verify(60, self.create_alarm, 5,
fail_msg, msg,
meter_name='vcpus',
threshold=threshold,
name=rand_name('ost1_test-ceilo-alarm'),
period=600,
statistic='sum',
comparison_operator='lt')
fail_msg = ('Failed while waiting for '
'alarm state to become "alarm" or "ok".')
msg = 'waiting for alarm state to become "alarm" or "ok"'
self.verify(300, self.wait_for_alarm_status, 6,
fail_msg, msg, alarm.alarm_id)
def test_create_sample(self):
"""Ceilometer test to create, check and list samples
Target component: Ceilometer
Scenario:
1. Create a sample for the image.
2. Get count of samples stored for the last hour for an image.
3. Create another sample for the image.
4. Check that the sample has the expected resource.
5. Get count of samples and compare counts before and after
the second sample creation.
6. Get the resource of the sample.
Duration: 5 s.
Deployment tags: Ceilometer
"""
self.check_image_exists()
image_id = self.get_image_from_name()
fail_msg = 'Failed to create first sample for image.'
msg = 'creating first sample for image'
self.verify(60, self.create_image_sample, 1, fail_msg, msg, image_id)
an_hour_ago = (datetime.datetime.now() -
datetime.timedelta(hours=1)).isoformat()
query = [{'field': 'resource', 'op': 'eq', 'value': image_id},
{'field': 'timestamp', 'op': 'gt', 'value': an_hour_ago}]
fail_msg = 'Failed to get samples for image.'
msg = 'getting samples for image'
count_before_create_second_sample = self.verify(
60, self.get_samples_count, 2, fail_msg, msg, 'image', query)
fail_msg = 'Failed to create second sample for image.'
msg = 'creating second sample for image'
second_sample = self.verify(60, self.create_image_sample, 1,
fail_msg, msg, image_id)
fail_msg = ('Resource of sample is missing or '
'does not equal to the expected resource.')
self.verify_response_body_value(
body_structure=second_sample[0].resource_id, value=image_id,
msg=fail_msg, failed_step=4)
fail_msg = ('Failed while waiting '
'for addition of new sample to samples list.')
msg = 'waiting for addition of new sample to samples list'
self.verify(20, self.wait_samples_count, 5, fail_msg, msg,
'image', query, count_before_create_second_sample)
fail_msg = 'Failed to get resource of sample.'
msg = 'getting resource of sample'
self.verify(20, self.ceilometer_client.resources.get, 6,
fail_msg, msg, second_sample[0].resource_id)
@ceilometermanager.check_compute_nodes()
def test_check_events_and_traits(self):
"""Ceilometer test to check events and traits
Target component: Ceilometer
Scenario:
1. Create an instance.
2. Wait for 'ACTIVE' status of the instance.
3. Check that event type list contains expected event type.
4. Check that event list contains event with expected type.
5. Check event traits description.
6. Check that event exists for expected instance.
7. Get information about expected event.
8. Delete the instance.
Duration: 40 s.
Deployment tags: Ceilometer
"""
event_type = 'compute.instance.create.start'
self.check_image_exists()
private_net_id, _ = self.create_network_resources()
name = rand_name('ost1_test-ceilo-instance-')
fail_msg = 'Failed to create instance.'
msg = 'creating instance'
vcenter = self.config.compute.use_vcenter
image_name = 'TestVM-VMDK' if vcenter else None
instance = self.verify(600, self.create_server, 1, fail_msg, msg, name,
net_id=private_net_id, img_name=image_name)
fail_msg = 'Failed while waiting for "ACTIVE" status of instance.'
msg = 'waiting for "ACTIVE" status of instance'
self.verify(200, self.wait_for_resource_status, 2,
fail_msg, msg, self.compute_client.servers,
instance.id, 'ACTIVE')
fail_msg = ('Failed to find "{event_type}" in event type list.'.format(
event_type=event_type))
msg = ('searching "{event_type}" in event type list'.format(
event_type=event_type))
self.verify(60, self.check_event_type, 3, fail_msg, msg, event_type)
fail_msg = ('Failed to find event with "{event_type}" type in event '
'list.'.format(event_type=event_type))
msg = ('searching event with "{event_type}" type in event type '
'list'.format(event_type=event_type))
query = [{'field': 'event_type', 'op': 'eq', 'value': event_type}]
events_list = self.verify(60, self.ceilometer_client.events.list, 4,
fail_msg, msg, query, limit=1000)
if not events_list:
self.fail('Events with "{event_type}" type not found'.format(
event_type=event_type))
traits = ['instance_id', 'request_id', 'state', 'service', 'host']
fail_msg = 'Failed to check event traits description.'
msg = 'checking event traits description'
self.verify(60, self.check_traits, 5, fail_msg, msg,
event_type=event_type, traits=traits)
fail_msg = ('Failed to find "{event_type}" event type with expected '
'instance ID.'.format(event_type=event_type))
msg = ('searching "{event_type}" event type with expected '
'instance ID'.format(event_type=event_type))
message_id = self.verify(60, self.check_event_message_id, 6,
fail_msg, msg, events_list, instance.id)
fail_msg = 'Failed to get event information.'
msg = 'getting event information'
self.verify(60, self.ceilometer_client.events.get, 7,
fail_msg, msg, message_id)
fail_msg = 'Failed to delete the instance.'
msg = 'instance deleting'
self.verify(60, self._delete_server, 8, fail_msg, msg, instance)
@ceilometermanager.check_compute_nodes()
def test_check_volume_events(self):
"""Ceilometer test to check events from Cinder
Target component: Ceilometer
Scenario:
1. Create an instance.
2. Wait for 'ACTIVE' status of the instance.
3. Create a volume and volume snapshot.
4. Get volume snapshot events.
5. Get volume events.
6. Delete the instance.
Duration: 150 s.
Deployment tags: Ceilometer
"""
if (not self.config.volume.cinder_node_exist
and not self.config.volume.ceph_exist):
self.skipTest('There are no storage nodes for volumes.')
self.check_image_exists()
private_net_id, _ = self.create_network_resources()
fail_msg = 'Failed to create instance.'
msg = 'creating instance'
name = rand_name('ostf-ceilo-instance-')
vcenter = self.config.compute.use_vcenter
image_name = 'TestVM-VMDK' if vcenter else None
instance = self.verify(300, self.create_server, 1, fail_msg, msg, name,
net_id=private_net_id, img_name=image_name)
fail_msg = 'Failed while waiting for "ACTIVE" status of instance.'
msg = 'waiting for "ACTIVE" status of instance'
self.verify(200, self.wait_for_resource_status, 2,
fail_msg, msg, self.compute_client.servers,
instance.id, 'ACTIVE')
fail_msg = 'Failed to create volume and volume snapshot.'
msg = 'creating volume and volume snapshot'
volume, snapshot = self.verify(300, self.volume_helper, 3,
fail_msg, msg, instance)
query = [{'field': 'resource_id', 'op': 'eq', 'value': snapshot.id}]
fail_msg = 'Failed to get volume snapshot events.'
msg = 'getting volume snapshot events'
self.verify(300, self.wait_for_ceilo_objects, 4,
fail_msg, msg, self.snapshot_events, query, 'event')
query = [{'field': 'resource_id', 'op': 'eq', 'value': volume.id}]
fail_msg = 'Failed to get volume events.'
msg = 'getting volume events'
self.verify(300, self.wait_for_ceilo_objects, 5,
fail_msg, msg, self.volume_events, query, 'event')
fail_msg = 'Failed to delete the server.'
msg = 'deleting server'
self.verify(60, self._delete_server, 6, fail_msg, msg, instance)
def test_check_glance_notifications(self):
"""Ceilometer test to check notifications from Glance
Target component: Ceilometer
Scenario:
1. Create an image.
2. Get image notifications.
Duration: 5 s.
Deployment tags: Ceilometer
"""
fail_msg = 'Failed to create image.'
msg = 'creating image'
image = self.verify(120, self.glance_helper, 1, fail_msg, msg)
query = [{'field': 'resource', 'op': 'eq', 'value': image.id}]
fail_msg = 'Failed to get image notifications.'
msg = 'getting image notifications'
self.verify(300, self.wait_for_ceilo_objects, 2,
fail_msg, msg, self.glance_notifications, query, 'sample')
def test_check_keystone_notifications(self):
"""Ceilometer test to check notifications from Keystone
Target component: Ceilometer
Scenario:
1. Create Keystone resources.
2. Get project notifications.
3. Get user notifications.
4. Get role notifications.
5. Get role assignment notifications.
6. Get group notifications.
7. Get trust notifications.
Duration: 5 s.
Available since release: 2014.2-6.0
Deployment tags: Ceilometer
"""
fail_msg = 'Failed to create some Keystone resources.'
msg = 'creating Keystone resources'
tenant, user, role, group, trust = self.verify(
60, self.identity_helper, 1, fail_msg, msg)
fail_msg = 'Failed to get project notifications.'
msg = 'getting project notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': tenant.id}]
self.verify(300, self.wait_for_ceilo_objects, 2, fail_msg, msg,
self.keystone_project_notifications, query, 'sample')
fail_msg = 'Failed to get user notifications.'
msg = 'getting user notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': user.id}]
self.verify(300, self.wait_for_ceilo_objects, 3, fail_msg, msg,
self.keystone_user_notifications, query, 'sample')
fail_msg = 'Failed to get role notifications.'
msg = 'getting role notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': role.id}]
self.verify(300, self.wait_for_ceilo_objects, 4, fail_msg, msg,
self.keystone_role_notifications, query, 'sample')
fail_msg = 'Failed to get role assignment notifications.'
msg = 'getting role assignment notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': role.id}]
self.verify(300, self.wait_for_ceilo_objects, 5, fail_msg, msg,
self.keystone_role_assignment_notifications, query,
'sample')
fail_msg = 'Failed to get group notifications.'
msg = 'getting group notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': group.id}]
self.verify(300, self.wait_for_ceilo_objects, 6, fail_msg, msg,
self.keystone_group_notifications, query, 'sample')
fail_msg = 'Failed to get trust notifications.'
msg = 'getting trust notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': trust.id}]
self.verify(300, self.wait_for_ceilo_objects, 7, fail_msg, msg,
self.keystone_trust_notifications, query, 'sample')
def test_check_neutron_notifications(self):
"""Ceilometer test to check notifications from Neutron
Target component: Ceilometer
Scenario:
1. Create Neutron resources.
2. Get network notifications.
3. Get subnet notifications.
4. Get port notifications.
5. Get router notifications.
6. Get floating IP notifications.
Duration: 40 s.
Deployment tags: Ceilometer, Neutron
"""
fail_msg = 'Failed to create some Neutron resources.'
msg = 'creating Neutron resources'
net, subnet, port, router, flip = self.verify(
60, self.neutron_helper, 1, fail_msg, msg)
fail_msg = 'Failed to get network notifications.'
msg = 'getting network notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': net['id']}]
self.verify(60, self.wait_for_ceilo_objects, 2, fail_msg, msg,
self.neutron_network_notifications, query, 'sample')
fail_msg = 'Failed to get subnet notifications.'
msg = 'getting subnet notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': subnet['id']}]
self.verify(60, self.wait_for_ceilo_objects, 3, fail_msg, msg,
self.neutron_subnet_notifications, query, 'sample')
fail_msg = 'Failed to get port notifications.'
msg = 'getting port notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': port['id']}]
self.verify(60, self.wait_for_ceilo_objects, 4, fail_msg, msg,
self.neutron_port_notifications, query, 'sample')
fail_msg = 'Failed to get router notifications.'
msg = 'getting router notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': router['id']}]
self.verify(60, self.wait_for_ceilo_objects, 5, fail_msg, msg,
self.neutron_router_notifications, query, 'sample')
fail_msg = 'Failed to get floating IP notifications.'
msg = 'getting floating IP notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': flip['id']}]
self.verify(60, self.wait_for_ceilo_objects, 6, fail_msg, msg,
self.neutron_floatingip_notifications, query, 'sample')
@ceilometermanager.check_compute_nodes()
def test_check_sahara_notifications(self):
"""Ceilometer test to check notifications from Sahara
Target component: Ceilometer
Scenario:
1. Find a correctly registered Sahara image
2. Create a Sahara cluster
3. Get cluster notifications
Duration: 40 s.
Deployment tags: Ceilometer, Sahara
"""
plugin_name = 'vanilla'
mapping_versions_of_plugin = {
"6.1": "2.4.1",
"7.0": "2.6.0",
"8.0": "2.7.1",
"9.0": "2.7.1",
"10.0": "2.7.1"
}
hadoop_version = mapping_versions_of_plugin.get(
self.config.fuel.fuel_version, "2.7.1")
fail_msg = 'Failed to find correctly registered Sahara image.'
msg = 'finding correctly registered Sahara image'
image_id = self.verify(60, self.find_and_check_image, 1,
fail_msg, msg, plugin_name, hadoop_version)
if image_id is None:
self.skipTest('Correctly registered image '
'to create Sahara cluster not found.')
fail_msg = 'Failed to create Sahara cluster.'
msg = 'creating Sahara cluster'
cluster = self.verify(300, self.sahara_helper, 2, fail_msg,
msg, image_id, plugin_name, hadoop_version)
fail_msg = 'Failed to get cluster notifications.'
msg = 'getting cluster notifications'
query = [{'field': 'resource', 'op': 'eq', 'value': cluster.id}]
self.verify(60, self.wait_for_ceilo_objects, 3, fail_msg, msg,
self.sahara_cluster_notifications, query, 'sample')

View File

@ -16,8 +16,7 @@ from fuel_health import heatmanager
class HeatSmokeTests(heatmanager.HeatBaseTest):
"""Test class verifies Heat API calls, rollback and
autoscaling use-cases.
"""Test class verifies Heat API calls and rollback use-cases.
"""
def setUp(self):
super(HeatSmokeTests, self).setUp()
@ -565,165 +564,6 @@ class HeatSmokeTests(heatmanager.HeatBaseTest):
stack.id
)
def test_autoscaling(self):
"""Check stack autoscaling
Target component: Heat
Scenario:
1. Create test flavor.
2. Create a keypair.
3. Save generated private key to file on Controller node.
4. Create a security group.
5. Create a stack.
6. Wait for the stack status to change to 'CREATE_COMPLETE'.
7. Create a floating IP.
8. Assign the floating IP to the instance of the stack.
9. Wait when the instance is ready to connect.
10. Wait for the 2nd instance to be launched.
11. Wait for the 2nd instance to be terminated.
12. Delete the file with private key.
13. Delete the stack.
14. Wait for the stack to be deleted.
Duration: 2200 s.
Deployment tags: Ceilometer
"""
self.check_image_exists()
self.check_required_resources(self.min_required_ram_mb)
# creation of test flavor
heat_flavor = self.verify(
50, self.create_flavor,
1, 'Test flavor can not be created.',
'flavor creation'
)
# creation of test keypair
keypair = self.verify(
10, self._create_keypair,
2, 'Keypair can not be created.',
'keypair creation',
self.compute_client
)
path_to_key = self.verify(
10, self.save_key_to_file,
3, 'Private key can not be saved to file.',
'saving private key to the file',
keypair.private_key
)
# creation of test security group
sec_group = self.verify(
60, self._create_security_group,
4, 'Security group can not be created.',
'security group creation',
self.compute_client, 'ost1_test-sgroup'
)
# definition of stack parameters
parameters = {
'KeyName': keypair.name,
'InstanceType': heat_flavor.name,
'ImageId': self.config.compute.image_name,
'SecurityGroup': sec_group.name
}
if 'neutron' in self.config.network.network_provider:
parameters['Net'], _ = self.create_network_resources()
template = self.load_template('heat_autoscaling_neutron.yaml')
else:
template = self.load_template('heat_autoscaling_nova.yaml')
# creation of stack
fail_msg = 'Stack was not created properly.'
stack = self.verify(
60, self.create_stack,
5, fail_msg,
'stack creation',
template, parameters=parameters
)
self.verify(
600, self.wait_for_stack_status,
6, fail_msg,
'stack status becoming "CREATE_COMPLETE"',
stack.id, 'CREATE_COMPLETE', 600, 15
)
reduced_stack_name = '{0}-{1}'.format(
stack.stack_name[:2], stack.stack_name[-4:])
instances = self.get_instances_by_name_mask(reduced_stack_name)
self.verify(
2, self.assertTrue,
6, 'Instance for the stack was not created.',
'verifying the number of instances after template update',
len(instances) != 0
)
# assigning floating ip
floating_ip = self.verify(
10, self._create_floating_ip,
7, 'Floating IP can not be created.',
'floating IP creation'
)
self.verify(
20, self._assign_floating_ip_to_instance,
8, 'Floating IP can not be assigned.',
'assigning floating IP',
self.compute_client, instances[0], floating_ip
)
# vm connection check
vm_connection = ('ssh -o StrictHostKeyChecking=no -i {0} {1}@{2}'.
format(path_to_key, 'cirros', floating_ip.ip))
self.verify(
120, self.wait_for_vm_ready_for_load,
9, 'VM is not ready or connection can not be established.',
'test script execution on VM',
vm_connection, 120, 15
)
# launching the second instance during autoscaling
self.verify(
1500, self.wait_for_autoscaling,
10, 'Failed to launch the 2nd instance per autoscaling alarm.',
'launching the new instance per autoscaling alarm',
len(instances) + 2, 1500, 10, reduced_stack_name
)
# termination of the second instance during autoscaling
self.verify(
1500, self.wait_for_autoscaling,
11, 'Failed to terminate the 2nd instance per autoscaling alarm.',
'terminating the 2nd instance per autoscaling alarm',
len(instances) + 1, 1500, 10, reduced_stack_name
)
# deletion of file with keypair from vm
self.verify(
10, self.delete_key_file,
12, 'The file with private key can not be deleted.',
'deleting the file with private key',
path_to_key
)
# deletion of stack
self.verify(
20, self.heat_client.stacks.delete,
13, 'Can not delete stack.',
'deleting stack',
stack.id
)
self.verify(
100, self.wait_for_stack_deleted,
14, 'Can not delete stack.',
'deleting stack',
stack.id
)
def test_rollback(self):
"""Check stack rollback
Target component: Heat

View File

@ -229,15 +229,11 @@ def _get_cluster_attrs(cluster_id, token=None):
additional_components = \
response['editable'].get('additional_components', dict())
use_vcenter = response['editable']['common'].get('use_vcenter', None)
libvrt_data = response['editable']['common'].get('libvirt_type', None)
if use_vcenter and use_vcenter.get('value'):
deployment_tags.add('use_vcenter')
additional_depl_tags = set()
comp_names = ['murano', 'sahara', 'heat', 'ceilometer', 'ironic']
comp_names = ['murano', 'sahara', 'heat', 'ironic']
def processor(comp):
if comp in comp_names:
@ -285,8 +281,7 @@ def _get_cluster_attrs(cluster_id, token=None):
storage_components = response['editable'].get('storage', dict())
storage_comp = ['volumes_ceph', 'images_ceph', 'ephemeral_ceph',
'objects_ceph', 'osd_pool_size', 'volumes_lvm',
'volumes_vmdk', 'images_vcenter']
'objects_ceph', 'osd_pool_size', 'volumes_lvm']
storage_depl_tags = set()

View File

@ -1,6 +1,5 @@
aodhclient>=0.5.0 # Apache-2.0
python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1 # Apache-2.0
python-ceilometerclient>=2.5.0 # Apache-2.0
python-keystoneclient>=2.0.0,!=2.1.0 # Apache-2.0
python-muranoclient>=0.8.2 # Apache-2.0
python-novaclient>=2.29.0,!=2.33.0 # Apache-2.0

View File

@ -21,7 +21,6 @@ Requires: python-aodhclient >= 0.1.0
Requires: python-amqplib >= 1.0.2
Requires: python-anyjson >= 0.3.3
Requires: python-oslo-config >= 1.1.1
Requires: python-ceilometerclient >= 1.0.9
Requires: python-cinderclient >= 1.0.6
Requires: python-ironicclient >= 0.3.3
Requires: python-keystoneclient >= 0.11