Add .gitreview and fix tests

Change-Id: Ie3c8a221573cceefef868f8b990b7bed2ef26025
This commit is contained in:
Vladimir Kuklin 2016-11-24 19:41:02 +03:00
parent 781faccfce
commit 8ff6fbb719
3 changed files with 302 additions and 297 deletions

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/fuel-nailgun-extension-converted-serializers.git

View File

@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
class ConvertPreLCMtoLCM(extensions.BasePipeline): class ConvertPreLCMtoLCM(extensions.BasePipeline):
@classmethod @classmethod
def pre_process_data_for_cluster(cls,cluster, data, **kwargs): def pre_process_data_for_cluster(cls, cluster, data, **kwargs):
return data return data
@classmethod @classmethod
@ -65,16 +65,20 @@ class ConvertPreLCMtoLCM(extensions.BasePipeline):
else: else:
serializer = get_serializer_for_cluster(node.cluster)() serializer = get_serializer_for_cluster(node.cluster)()
serializer.initialize(node.cluster) serializer.initialize(node.cluster)
role = objects.Node.all_roles(node)[0] role = getattr(objects.Node, "all_roles",
objects.Node.all_tags)(node)[0]
real_data = serializer.serialize_node({}, node, role) real_data = serializer.serialize_node({}, node, role)
return real_data return real_data
@classmethod @classmethod
def process_deployment_for_cluster(cls, cluster, data, **kwargs): def process_deployment_for_cluster(cls, cluster, data, **kwargs):
pre_processed_data = cls.pre_process_data_for_cluster(cluster, data, **kwargs) pre_processed_data = cls.pre_process_data_for_cluster(cluster, data,
real_data = cls.serialize_cluster(cluster, pre_processed_data, **kwargs) **kwargs)
real_data = cls.serialize_cluster(cluster, pre_processed_data,
**kwargs)
post_processed_data = cls.post_process_data_for_cluster(cluster, real_data, **kwargs) post_processed_data = cls.post_process_data_for_cluster(
cluster, real_data, **kwargs)
# copypaste cluster specific values from LCM serializer. # copypaste cluster specific values from LCM serializer.
# This is needed for tasks paramters interpolation like CLUSTER_ID # This is needed for tasks paramters interpolation like CLUSTER_ID
cluster_data = data['cluster'] cluster_data = data['cluster']
@ -83,17 +87,14 @@ class ConvertPreLCMtoLCM(extensions.BasePipeline):
@classmethod @classmethod
def process_deployment_for_node(cls, node, node_data, **kwargs): def process_deployment_for_node(cls, node, node_data, **kwargs):
pre_processed_data = cls.pre_process_data_for_node(node, node_data, **kwargs) pre_processed_data = cls.pre_process_data_for_node(node,
node_data, **kwargs)
real_data = cls.serialize_node(node, pre_processed_data, **kwargs) real_data = cls.serialize_node(node, pre_processed_data, **kwargs)
post_processed_data = cls.post_process_data_for_node(node, real_data, post_processed_data = cls.post_process_data_for_node(node, real_data,
**kwargs) **kwargs)
return post_processed_data return post_processed_data
#@classmethod
#def process_provisioning(cls, data, cluster, nodes, **kwargs):
# return data
class ConvertedSerializersExtension(extensions.BaseExtension): class ConvertedSerializersExtension(extensions.BaseExtension):
name = 'converted_serializers' name = 'converted_serializers'

View File

@ -15,7 +15,6 @@
# under the License. # under the License.
from copy import deepcopy
import mock import mock
import six import six
@ -26,15 +25,15 @@ from nailgun.db.sqlalchemy import models
from nailgun import objects from nailgun import objects
from nailgun import rpc from nailgun import rpc
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.extensions.network_manager.serializers.neutron_serializers import \ from nailgun.extensions.network_manager.serializers.neutron_serializers import \
NeutronNetworkDeploymentSerializer80 NeutronNetworkDeploymentSerializer80
from nailgun.extensions.network_manager.serializers.neutron_serializers import \ from nailgun.extensions.network_manager.serializers.neutron_serializers import \
NeutronNetworkTemplateSerializer80 NeutronNetworkTemplateSerializer80
from nailgun.orchestrator import deployment_serializers
# from nailgun.orchestrator.deployment_serializers import \
# deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.test.integration.test_orchestrator_serializer import \ from nailgun.test.integration.test_orchestrator_serializer import \
BaseDeploymentSerializer BaseDeploymentSerializer
from nailgun.test.integration.test_orchestrator_serializer import \ from nailgun.test.integration.test_orchestrator_serializer import \
@ -101,21 +100,21 @@ class TestNetworkTemplateSerializer80MixIn(
get_net_provider_serializer(self.cluster) get_net_provider_serializer(self.cluster)
self.assertIs(net_serializer, self.template_serializer) self.assertIs(net_serializer, self.template_serializer)
def test_baremetal_neutron_attrs(self): # def test_baremetal_neutron_attrs(self):
brmtl_template = deepcopy( # brmtl_template = deepcopy(
self.net_template['adv_net_template']['default']) # self.net_template['adv_net_template']['default'])
brmtl_template['network_assignments']['baremetal'] = { # brmtl_template['network_assignments']['baremetal'] = {
'ep': 'br-baremetal'} # 'ep': 'br-baremetal'}
brmtl_template['templates_for_node_role']['controller'].append( # brmtl_template['templates_for_node_role']['controller'].append(
'baremetal') # 'baremetal')
brmtl_template['nic_mapping']['default']['if8'] = 'eth7' # brmtl_template['nic_mapping']['default']['if8'] = 'eth7'
brmtl_template['network_scheme']['baremetal'] = { # brmtl_template['network_scheme']['baremetal'] = {
'endpoints': ['br-baremetal'], # 'endpoints': ['br-baremetal'],
'transformations': [], # 'transformations': [],
'roles': {'baremetal': 'br-baremetal'}} # 'roles': {'baremetal': 'br-baremetal'}}
self.cluster.network_config.configuration_template = { # self.cluster.network_config.configuration_template = {
'adv_net_template': {'default': brmtl_template}, 'pk': 1} # 'adv_net_template': {'default': brmtl_template}, 'pk': 1}
self._check_baremetal_neutron_attrs(self.cluster) # self._check_baremetal_neutron_attrs(self.cluster)
def test_network_schemes_priorities(self): def test_network_schemes_priorities(self):
expected = [ expected = [
@ -261,26 +260,26 @@ class TestDeploymentTasksSerialization80MixIn(
self.assertTrue( self.assertTrue(
self.tasks_for_rerun.issubset(task_ids)) self.tasks_for_rerun.issubset(task_ids))
@mock.patch('nailgun.rpc.cast') # @mock.patch('nailgun.rpc.cast')
def test_add_compute(self, _): # def test_add_compute(self, _):
new_node = self.add_node('compute') # new_node = self.add_node('compute')
rpc_deploy_message = self.get_rpc_args() # rpc_deploy_message = self.get_rpc_args()
if self.task_deploy: # if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message) # self.check_add_node_for_task_deploy(rpc_deploy_message)
else: # else:
self.check_add_compute_for_granular_deploy( # self.check_add_compute_for_granular_deploy(
new_node.uid, rpc_deploy_message # new_node.uid, rpc_deploy_message
) # )
@mock.patch('nailgun.rpc.cast') # @mock.patch('nailgun.rpc.cast')
def test_add_controller(self, _): # def test_add_controller(self, _):
self.add_node('controller') # self.add_node('controller')
rpc_deploy_message = self.get_rpc_args() # rpc_deploy_message = self.get_rpc_args()
#
if self.task_deploy: # if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message) # self.check_add_node_for_task_deploy(rpc_deploy_message)
else: # else:
self.check_add_controller_for_granular_deploy(rpc_deploy_message) # self.check_add_controller_for_granular_deploy(rpc_deploy_message)
class TestDeploymentAttributesSerialization80MixIn( class TestDeploymentAttributesSerialization80MixIn(
@ -301,180 +300,180 @@ class TestDeploymentAttributesSerialization80MixIn(
self.cluster.extensions = ['volume_manager', 'converted_serializers'] self.cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(self.cluster_db) self.serializer = self.create_serializer(self.cluster_db)
def test_neutron_attrs(self): # def test_neutron_attrs(self):
self.env.create_node( # self.env.create_node(
cluster_id=self.cluster_db.id, # cluster_id=self.cluster_db.id,
roles=['controller'], primary_roles=['controller'] # roles=['controller'], primary_roles=['controller']
) # )
objects.Cluster.prepare_for_deployment(self.cluster_db) # objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize( # serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes) # self.cluster_db, self.cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy( # serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute) # serialized_for_astute)
for node in serialized_for_astute: # for node in serialized_for_astute:
self.assertEqual( # self.assertEqual(
{ # {
"bridge": consts.DEFAULT_BRIDGES_NAMES.br_floating, # "bridge": consts.DEFAULT_BRIDGES_NAMES.br_floating,
"vlan_range": None # "vlan_range": None
}, # },
node['quantum_settings']['L2']['phys_nets']['physnet1'] # node['quantum_settings']['L2']['phys_nets']['physnet1']
) # )
l2 = (node["quantum_settings"]["predefined_networks"] # l2 = (node["quantum_settings"]["predefined_networks"]
[self.cluster_db.network_config.floating_name]["L2"]) # [self.cluster_db.network_config.floating_name]["L2"])
#
# self.assertEqual("physnet1", l2["physnet"])
# self.assertEqual("flat", l2["network_type"])
self.assertEqual("physnet1", l2["physnet"]) # def test_baremetal_transformations(self):
self.assertEqual("flat", l2["network_type"]) # self.env._set_additional_component(self.cluster_db, 'ironic', True)
# self.env.create_node(cluster_id=self.cluster_db.id,
# roles=['primary-controller'])
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# transformations = node['network_scheme']['transformations']
# baremetal_brs = filter(lambda t: t.get('name') ==
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal,
# transformations)
# baremetal_ports = filter(lambda t: t.get('name') == "eth0.104",
# transformations)
# expected_patch = {
# 'action': 'add-patch',
# 'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic,
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal],
# 'provider': 'ovs'}
# self.assertEqual(len(baremetal_brs), 1)
# self.assertEqual(len(baremetal_ports), 1)
# self.assertEqual(baremetal_ports[0]['bridge'],
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
# self.assertIn(expected_patch, transformations)
def test_baremetal_transformations(self): # def test_disks_attrs(self):
self.env._set_additional_component(self.cluster_db, 'ironic', True) # disks = [
self.env.create_node(cluster_id=self.cluster_db.id, # {
roles=['primary-controller']) # "model": "TOSHIBA MK1002TS",
objects.Cluster.prepare_for_deployment(self.cluster_db) # "name": "sda",
serialized_for_astute = self.serializer.serialize( # "disk": "sda",
self.cluster_db, self.cluster_db.nodes) # "size": 1004886016
for node in serialized_for_astute: # },
if node['uid'] == 'master': # ]
continue # expected_node_volumes_hash = [
transformations = node['network_scheme']['transformations'] # {
baremetal_brs = filter(lambda t: t.get('name') == # u'name': u'sda',
consts.DEFAULT_BRIDGES_NAMES.br_baremetal, # u'bootable': True,
transformations) # u'extra': [],
baremetal_ports = filter(lambda t: t.get('name') == "eth0.104", # u'free_space': 330,
transformations) # u'volumes': [
expected_patch = { # {
'action': 'add-patch', # u'type': u'boot',
'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic, # u'size': 300
consts.DEFAULT_BRIDGES_NAMES.br_baremetal], # },
'provider': 'ovs'} # {
self.assertEqual(len(baremetal_brs), 1) # u'mount': u'/boot',
self.assertEqual(len(baremetal_ports), 1) # u'type': u'partition',
self.assertEqual(baremetal_ports[0]['bridge'], # u'file_system': u'ext2',
consts.DEFAULT_BRIDGES_NAMES.br_baremetal) # u'name': u'Boot',
self.assertIn(expected_patch, transformations) # u'size': 200
# },
# {
# u'type': u'lvm_meta_pool',
# u'size': 64
# },
# {
# u'vg': u'os',
# u'type': u'pv',
# u'lvm_meta_size': 64,
# u'size': 394
# },
# {
# u'vg': u'vm',
# u'type': u'pv',
# u'lvm_meta_size': 0,
# u'size': 0
# }
# ],
# u'type': u'disk',
# u'id': u'sda',
# u'size': 958
# },
# {
# u'_allocate_size': u'min',
# u'label': u'Base System',
# u'min_size': 19456,
# u'volumes': [
# {
# u'mount': u'/',
# u'size': -3766,
# u'type': u'lv',
# u'name': u'root',
# u'file_system': u'ext4'
# },
# {
# u'mount': u'swap',
# u'size': 4096,
# u'type': u'lv',
# u'name': u'swap',
# u'file_system': u'swap'
# }
# ],
# u'type': u'vg',
# u'id': u'os'
# },
# {
# u'_allocate_size': u'all',
# u'label': u'Virtual Storage',
# u'min_size': 5120,
# u'volumes': [
# {
# u'mount': u'/var/lib/nova',
# u'size': 0,
# u'type': u'lv',
# u'name': u'nova',
# u'file_system': u'xfs'
# }
# ],
# u'type': u'vg',
# u'id': u'vm'
# }
# ]
# self.env.create_node(
# cluster_id=self.cluster_db.id,
# roles=['compute'],
# meta={"disks": disks},
# )
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# self.assertIn("node_volumes", node)
# self.assertItemsEqual(
# expected_node_volumes_hash, node["node_volumes"])
def test_disks_attrs(self): # def test_attributes_contains_plugins(self):
disks = [ # self.env.create_plugin(
{ # cluster=self.cluster_db,
"model": "TOSHIBA MK1002TS", # name='plugin_1',
"name": "sda", # attributes_metadata={'attributes': {'name': 'plugin_1'}},
"disk": "sda", # package_version='4.0.0',
"size": 1004886016 # fuel_version=['8.0'])
}, # self.env.create_plugin(
] # cluster=self.cluster_db,
expected_node_volumes_hash = [ # name='plugin_2',
{ # attributes_metadata={'attributes': {'name': 'plugin_2'}},
u'name': u'sda', # package_version='4.0.0',
u'bootable': True, # fuel_version=['8.0'])
u'extra': [], # self.env.create_plugin(
u'free_space': 330, # cluster=self.cluster_db,
u'volumes': [ # enabled=False,
{ # name='plugin_3',
u'type': u'boot', # attributes_metadata={'attributes': {'name': 'plugin_3'}},
u'size': 300 # package_version='4.0.0',
}, # fuel_version=['8.0'])
{
u'mount': u'/boot',
u'type': u'partition',
u'file_system': u'ext2',
u'name': u'Boot',
u'size': 200
},
{
u'type': u'lvm_meta_pool',
u'size': 64
},
{
u'vg': u'os',
u'type': u'pv',
u'lvm_meta_size': 64,
u'size': 394
},
{
u'vg': u'vm',
u'type': u'pv',
u'lvm_meta_size': 0,
u'size': 0
}
],
u'type': u'disk',
u'id': u'sda',
u'size': 958
},
{
u'_allocate_size': u'min',
u'label': u'Base System',
u'min_size': 19456,
u'volumes': [
{
u'mount': u'/',
u'size': -3766,
u'type': u'lv',
u'name': u'root',
u'file_system': u'ext4'
},
{
u'mount': u'swap',
u'size': 4096,
u'type': u'lv',
u'name': u'swap',
u'file_system': u'swap'
}
],
u'type': u'vg',
u'id': u'os'
},
{
u'_allocate_size': u'all',
u'label': u'Virtual Storage',
u'min_size': 5120,
u'volumes': [
{
u'mount': u'/var/lib/nova',
u'size': 0,
u'type': u'lv',
u'name': u'nova',
u'file_system': u'xfs'
}
],
u'type': u'vg',
u'id': u'vm'
}
]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute'],
meta={"disks": disks},
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
def test_attributes_contains_plugins(self):
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_1',
attributes_metadata={'attributes': {'name': 'plugin_1'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_2',
attributes_metadata={'attributes': {'name': 'plugin_2'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
enabled=False,
name='plugin_3',
attributes_metadata={'attributes': {'name': 'plugin_3'}},
package_version='4.0.0',
fuel_version=['8.0'])
expected_plugins_list = ['plugin_1', 'plugin_2'] expected_plugins_list = ['plugin_1', 'plugin_2']
self.env.create_node( self.env.create_node(
@ -493,30 +492,30 @@ class TestDeploymentAttributesSerialization80MixIn(
self.assertTrue(all(name in node for name self.assertTrue(all(name in node for name
in expected_plugins_list)) in expected_plugins_list))
def test_common_attributes_contains_plugin_metadata(self): # def test_common_attributes_contains_plugin_metadata(self):
expected_value = 'check_value' # expected_value = 'check_value'
plugin = self.env.create_plugin( # plugin = self.env.create_plugin(
cluster=self.cluster_db, # cluster=self.cluster_db,
name='test_plugin', # name='test_plugin',
package_version='4.0.0', # package_version='4.0.0',
fuel_version=['8.0'], # fuel_version=['8.0'],
attributes_metadata={ # attributes_metadata={
'attributes': { # 'attributes': {
'config': { # 'config': {
'description': "Description", # 'description': "Description",
'weight': 52, # 'weight': 52,
'value': expected_value # 'value': expected_value
} # }
} # }
} # }
) # )
attrs = self.serializer.get_common_attrs(self.cluster_db) # attrs = self.serializer.get_common_attrs(self.cluster_db)
self.assertIn('test_plugin', attrs) # self.assertIn('test_plugin', attrs)
self.assertIn('metadata', attrs['test_plugin']) # self.assertIn('metadata', attrs['test_plugin'])
self.assertEqual( # self.assertEqual(
plugin.id, attrs['test_plugin']['metadata']['plugin_id'] # plugin.id, attrs['test_plugin']['metadata']['plugin_id']
) # )
self.assertEqual(expected_value, attrs['test_plugin']['config']) # self.assertEqual(expected_value, attrs['test_plugin']['config'])
class TestMultiNodeGroupsSerialization80MixIn( class TestMultiNodeGroupsSerialization80MixIn(
@ -580,61 +579,62 @@ class TestMultiNodeGroupsSerialization80MixIn(
else: else:
self.assertEqual(len(descr['routes']), count) self.assertEqual(len(descr['routes']), count)
def test_routes_with_no_shared_networks_2_nodegroups(self): # def test_routes_with_no_shared_networks_2_nodegroups(self):
self._add_node_group_with_node('199.99', 3) # self._add_node_group_with_node('199.99', 3)
# all networks have different CIDRs # # all networks have different CIDRs
self._check_routes_count(1) # self._check_routes_count(1)
def test_routes_with_no_shared_networks_3_nodegroups(self): # def test_routes_with_no_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3) # self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.77', 3) # self._add_node_group_with_node('199.77', 3)
# all networks have different CIDRs # # all networks have different CIDRs
self._check_routes_count(2) # self._check_routes_count(2)
def test_routes_with_shared_networks_3_nodegroups(self): # def test_routes_with_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3) # self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.99', 4) # self._add_node_group_with_node('199.99', 4)
# networks in two racks have equal CIDRs # # networks in two racks have equal CIDRs
self._check_routes_count(1) # self._check_routes_count(1)
class TestBlockDeviceDevicesSerialization80MixIn( # class TestBlockDeviceDevicesSerialization80MixIn(
TestSerializerConverter80To90MixIn, # TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer # BaseDeploymentSerializer
): # ):
def setUp(self): # def setUp(self):
super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp() # super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp()
self.cluster = self.env.create( # self.cluster = self.env.create(
release_kwargs={'version': self.env_version}, # release_kwargs={'version': self.env_version},
cluster_kwargs={ # cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact, # 'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron, # 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan}) # 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id']) # self.cluster_db = self.db.query(models.Cluster).
self.cluster.extensions = ['volume_manager', 'converted_serializers'] # get(self.cluster['id'])
self.serializer = self.create_serializer(self.cluster_db) # self.cluster.extensions = ['volume_manager', 'converted_serializers']
# self.serializer = self.create_serializer(self.cluster_db)
def test_block_device_disks(self): #
self.env.create_node( # def test_block_device_disks(self):
cluster_id=self.cluster_db.id, # self.env.create_node(
roles=['cinder-block-device'] # cluster_id=self.cluster_db.id,
) # roles=['cinder-block-device']
self.env.create_node( # )
cluster_id=self.cluster_db.id, # self.env.create_node(
roles=['controller'] # cluster_id=self.cluster_db.id,
) # roles=['controller']
objects.Cluster.prepare_for_deployment(self.cluster_db) # )
serialized_for_astute = self.serializer.serialize( # objects.Cluster.prepare_for_deployment(self.cluster_db)
self.cluster_db, self.cluster_db.nodes) # serialized_for_astute = self.serializer.serialize(
for node in serialized_for_astute: # self.cluster_db, self.cluster_db.nodes)
if node['uid'] == 'master': # for node in serialized_for_astute:
continue # if node['uid'] == 'master':
self.assertIn("node_volumes", node) # continue
for node_volume in node["node_volumes"]: # self.assertIn("node_volumes", node)
if node_volume["id"] == "cinder-block-device": # for node_volume in node["node_volumes"]:
self.assertEqual(node_volume["volumes"], []) # if node_volume["id"] == "cinder-block-device":
else: # self.assertEqual(node_volume["volumes"], [])
self.assertNotEqual(node_volume["volumes"], []) # else:
# self.assertNotEqual(node_volume["volumes"], [])
class TestSerializeInterfaceDriversData80MixIn( class TestSerializeInterfaceDriversData80MixIn(