diff --git a/fuel_health/config.py b/fuel_health/config.py index 12edb5b4..390b66b3 100644 --- a/fuel_health/config.py +++ b/fuel_health/config.py @@ -258,6 +258,9 @@ VolumeGroup = [ cfg.BoolOpt('cinder_node_exist', default=True, help="Allow to run tests if cinder exist"), + cfg.BoolOpt('cinder_vmware_node_exist', + default=True, + help="Allow to run tests if cinder-vmware exist"), cfg.BoolOpt('ceph_exist', default=True, help="Allow to run tests if ceph exist"), @@ -270,6 +273,9 @@ VolumeGroup = [ cfg.StrOpt('backend2_name', default='BACKEND_2', help="Name of the backend2 (must be declared in cinder.conf)"), + cfg.StrOpt('cinder_vmware_storage_az', + default='vcenter', + help="Name of storage availability zone for cinder-vmware."), ] @@ -537,6 +543,8 @@ class NailgunConfig(object): LOG.info('set proxy successful') self._parse_cluster_generated_data() LOG.info('parse generated successful') + self._parse_vmware_attributes() + LOG.info('parse vmware attributes successful') except exceptions.SetProxy as exc: raise exc except Exception: @@ -604,6 +612,8 @@ class NailgunConfig(object): node['online'] is True, data) cinder_nodes = filter(lambda node: 'cinder' in node['roles'], data) + cinder_vmware_nodes = filter(lambda node: 'cinder-vmware' in + node['roles'], data) controller_ips = [] conntroller_names = [] public_ips = [] @@ -625,6 +635,8 @@ class NailgunConfig(object): self.compute.online_controllers = online_controllers_ips if not cinder_nodes: self.volume.cinder_node_exist = False + if not cinder_vmware_nodes: + self.volume.cinder_vmware_node_exist = False compute_nodes = filter(lambda node: 'compute' in node['roles'], data) @@ -680,6 +692,13 @@ class NailgunConfig(object): self.identity.url = data['horizon_url'] + 'dashboard' self.identity.uri = data['keystone_url'] + 'v2.0/' + def _parse_vmware_attributes(self): + if self.volume.cinder_vmware_node_exist: + api_url = '/api/clusters/%s/vmware_attributes' % self.cluster_id + data = self.req_session.get(self.nailgun_url + api_url).json() + az = data['editable']['value']['availability_zones'][0]['az_name'] + self.volume.cinder_vmware_storage_az = "{0}-cinder".format(az) + def find_proxy(self, ip): endpoint = self.network.raw_data.get( diff --git a/fuel_health/nmanager.py b/fuel_health/nmanager.py index 872541f9..e1ff8472 100644 --- a/fuel_health/nmanager.py +++ b/fuel_health/nmanager.py @@ -967,13 +967,15 @@ class SmokeChecksTest(OfficialClientTest): self.set_resource(name, role) return role - def _create_boot_volume(self, client): + def _create_boot_volume(self, client, img_name=None, **kwargs): display_name = rand_name('ost1_test-bootable-volume') - imageRef = self.get_image_from_name() + + imageRef = self.get_image_from_name(img_name=img_name) + LOG.debug( 'Image ref is {0} for volume {1}'.format(imageRef, display_name)) return self._create_volume( - client, display_name=display_name, imageRef=imageRef) + client, display_name=display_name, imageRef=imageRef, **kwargs) def create_instance_from_volume(self, client, volume): if not self.find_micro_flavor(): @@ -1012,12 +1014,14 @@ class SmokeChecksTest(OfficialClientTest): self.set_resource(name, server) return server - def _create_server(self, client): + def _create_server(self, client, img_name=None): if not self.find_micro_flavor(): self.fail("m1.micro flavor was not created.") name = rand_name('ost1_test-volume-instance') - base_image_id = self.get_image_from_name() + + base_image_id = self.get_image_from_name(img_name=img_name) + if 'neutron' in self.config.network.network_provider: network = [net.id for net in self.compute_client.networks.list() diff --git a/fuel_health/tests/smoke/test_vcenter.py b/fuel_health/tests/smoke/test_vcenter.py index e7641d1a..84eb19e7 100644 --- a/fuel_health/tests/smoke/test_vcenter.py +++ b/fuel_health/tests/smoke/test_vcenter.py @@ -405,3 +405,123 @@ class TestVcenterImageAction(nmanager.SmokeChecksTest): self.verify(30, self._delete_server, 7, "Server can not be deleted.", "server deletion", server) + + +class VcenterVolumesTest(nmanager.SmokeChecksTest): + + @classmethod + def setUpClass(cls): + super(VcenterVolumesTest, cls).setUpClass() + if cls.manager.clients_initialized: + cls.micro_flavors = cls.find_micro_flavor() + + def setUp(self): + super(VcenterVolumesTest, self).setUp() + self.check_clients_state() + if (not self.config.volume.cinder_vmware_node_exist): + self.skipTest('There are no cinder-vmware nodes') + self.check_image_exists() + + @classmethod + def tearDownClass(cls): + super(VcenterVolumesTest, cls).tearDownClass() + + def _wait_for_volume_status(self, volume, status): + self.status_timeout(self.volume_client.volumes, volume.id, status) + + def _wait_for_instance_status(self, server, status): + self.status_timeout(self.compute_client.servers, server.id, status) + + def test_5_vcenter_volume_create(self): + """vCenter: Create volume and attach it to instance + Target component: Compute + + Scenario: + 1. Create a new small-size volume. + 2. Wait for volume status to become "available". + 3. Check volume has correct name. + 4. Create new instance. + 5. Wait for "Active" status + 6. Attach volume to an instance. + 7. Check volume status is "in use". + 8. Get information on the created volume by its id. + 9. Detach volume from the instance. + 10. Check volume has "available" status. + 11. Delete volume. + 12. Verify that volume deleted + 13. Delete server. + + Duration: 350 s. + Available since release: 2014.2-6.1 + Deployment tags: nova_network, use_vcenter + """ + msg_s1 = 'Volume was not created.' + img_name = 'TestVM-VMDK' + az = self.config.volume.cinder_vmware_storage_az + # Create volume + volume = self.verify(120, self._create_volume, 1, + msg_s1, + "volume creation", + self.volume_client, None, availability_zone=az) + + self.verify(200, self._wait_for_volume_status, 2, + msg_s1, + "volume becoming 'available'", + volume, 'available') + + self.verify_response_true( + volume.display_name.startswith('ostf-test-volume'), + 'Step 3 failed: {msg}'.format(msg=msg_s1)) + + # create instance + instance = self.verify(200, self._create_server, 4, + "Instance creation failed. ", + "server creation", + self.compute_client, img_name) + + self.verify(200, self._wait_for_instance_status, 5, + 'Instance status did not become "available".', + "instance becoming 'available'", + instance, 'ACTIVE') + + # Attach volume + self.verify(120, self._attach_volume_to_instance, 6, + 'Volume couldn`t be attached.', + 'volume attachment', + volume, instance.id) + + self.verify(180, self._wait_for_volume_status, 7, + 'Attached volume status did not become "in-use".', + "volume becoming 'in-use'", + volume, 'in-use') + + # get volume details + self.verify(20, self.volume_client.volumes.get, 8, + "Can not retrieve volume details. ", + "retrieving volume details", volume.id) + + # detach volume + self.verify(50, self._detach_volume, 9, + 'Can not detach volume. ', + "volume detachment", + instance.id, volume.id) + + self.verify(120, self._wait_for_volume_status, 10, + 'Volume status did not become "available".', + "volume becoming 'available'", + volume, 'available') + + self.verify(50, self.volume_client.volumes.delete, 11, + 'Can not delete volume. ', + "volume deletion", + volume) + + self.verify(50, self.verify_volume_deletion, 12, + 'Can not delete volume. ', + "volume deletion", + volume) + + self.verify(30, self._delete_server, 13, + "Can not delete server. ", + "server deletion", + instance)