Update service_chain heat node driver for LBaaS v2

This patch also updates the chain_mapping policy driver
and the Node Composition Plugin (NCP) to accomodate the
transactional semantics of an async driver such as
the aim_mapping driver which performs all operations
in the pre_commit phase. Note that the chain_mapping driver
and NCP will continue to work with the resource_mapping
driver (or a similar driver) as before.

Since the service chain plugin does not have any knowledge of
pre- or post- commit transaction phases of the GBP plugin that
drive the service chain plugin, a workaround (read hack!) has
been introduced in the current patch to overcome this. Enhancing
the service chain plugin structure is a bigger discussion
and beyond the scope of this patch. It will be tackled in the
context of enhancing and supporting the NFP framework for
aim_mapping like single transaction drivers.

All references to LOADBALANCER type have also been updated to
LOADBALANCERV2 in the unit tests.

Change-Id: I74f1b3d0d61bd6c3859fcaaf9a569ca3c6cc07ed
This commit is contained in:
Sumit Naiksatam 2017-07-31 03:00:27 -07:00
parent a29a3725af
commit f03aaf7abf
17 changed files with 262 additions and 129 deletions

View File

@ -45,4 +45,9 @@ STATUS_ACTIVE = 'ACTIVE'
STATUS_BUILD = 'BUILD'
STATUS_ERROR = 'ERROR'
PRE_COMMIT = 'pre_commit'
POST_COMMIT = 'post_commit'
STATUS_STATES = [STATUS_ACTIVE, STATUS_BUILD, STATUS_ERROR]
PRECOMMIT_POLICY_DRIVERS = ['aim_mapping']

View File

@ -12,6 +12,10 @@
import six
from oslo_config import cfg
from gbpservice.neutron.services.grouppolicy.common import constants as const
def convert_ip_pool_list_to_string(ip_pool):
if type(ip_pool) is not list:
@ -30,3 +34,14 @@ def convert_ip_pool_string_to_list(ip_pool_string):
return [prefix.strip() for prefix in ip_pool_string.split(',')]
else:
return []
def is_precommit_policy_driver_configured():
# This method checks if exactly one of the policy drivers designated
# as a "pre-commit" driver, and defined in:
# const.PRECOMMIT_POLICY_DRIVERS
# is present in the list of configured policy drivers.
a = set(cfg.CONF.group_policy.policy_drivers)
if len(set(a) & set(const.PRECOMMIT_POLICY_DRIVERS)) == 1:
return True
return False

View File

@ -29,6 +29,7 @@ from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.grouppolicy.common import utils as gutils
from gbpservice.neutron.services.grouppolicy.drivers import nsp_manager
from gbpservice.neutron.services.grouppolicy import sc_notifications
@ -130,18 +131,39 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
# This wrapper is called from both precommit and postcommit for
# deciding ordering for execution of precommit and postcommit
if 'precommit' in caller_method:
# In case of aim_mapping driver, postcommit functionality will be
# executed as a part of precommit
if ('aim_mapping' in cfg.CONF.group_policy.policy_drivers):
method = getattr(self, '_' + caller_method.replace(
'precommit', 'postcommit'))
method(context)
context._plugin_context.commit_phase = gconst.PRE_COMMIT
# In case of aim_mapping driver (or a similar precommit driver),
# postcommit functionality will be executed as a part of precommit
if gutils.is_precommit_policy_driver_configured():
if 'delete_policy_target_group' in caller_method:
context.ptg_chain_map = self._get_ptg_servicechain_mapping(
context._plugin_context.session,
context.current['id'])
self._cleanup_redirect_action(context)
else:
method = getattr(self, '_' + caller_method.replace(
'precommit', 'postcommit'))
method(context)
else:
# If driver is not aim_mapping then postcommit functionality will
# be executed by the call from gbp plugin
if ('aim_mapping' not in cfg.CONF.group_policy.policy_drivers):
method = getattr(self, '_' + caller_method)
method(context)
context._plugin_context.commit_phase = gconst.POST_COMMIT
# If driver is not aim_mapping (or a similar precommit driver),
# then postcommit functionality will be executed by the call
# from gbp plugin
if not gutils.is_precommit_policy_driver_configured():
method = getattr(self, '_' + caller_method)
method(context)
elif 'create_policy_target_group' in caller_method or (
'create_external_policy' in caller_method):
if hasattr(context, 'provider_context') and (
hasattr(context, 'servicechain_attrs')):
context.provider_context.commit_phase = (
context._plugin_context.commit_phase)
context.provider_context.servicechain_instance = (
context._plugin_context.servicechain_instance)
super(ChainMappingDriver,
self)._create_servicechain_instance(
context.provider_context,
context.servicechain_attrs)
@log.log_method_call
def _create_policy_target_postcommit(self, context):
@ -169,7 +191,12 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
'create_policy_target_postcommit', context)
@log.log_method_call
def _delete_policy_target_postcommit(self, context):
def delete_policy_target_precommit(self, context):
context._is_service_target = context._plugin._is_service_target(
context._plugin_context, context.current['id'])
@log.log_method_call
def delete_policy_target_postcommit(self, context):
if not context._is_service_target:
mappings = self._get_ptg_servicechain_mapping(
context._plugin_context.session,
@ -182,18 +209,6 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
chain_context, context.current,
mapping.servicechain_instance_id)
@log.log_method_call
def delete_policy_target_precommit(self, context):
context._is_service_target = context._plugin._is_service_target(
context._plugin_context, context.current['id'])
self.precommit_wrapper_for_chain_mapping(
'delete_policy_target_precommit', context)
@log.log_method_call
def delete_policy_target_postcommit(self, context):
self.precommit_wrapper_for_chain_mapping(
'delete_policy_target_postcommit', context)
@log.log_method_call
def _create_policy_target_group_postcommit(self, context):
if (context.current['provided_policy_rule_sets'] and
@ -761,9 +776,12 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
'management_ptg_id': None,
'classifier_id': classifier_id,
'config_param_values': jsonutils.dumps(config_param_values)}
context.provider_context = p_ctx
context.servicechain_attrs = attrs
sc_instance = super(
ChainMappingDriver, self)._create_servicechain_instance(
p_ctx, attrs)
context._plugin_context.servicechain_instance = sc_instance
self._set_ptg_servicechain_instance_mapping(
session, provider_ptg_id, SCI_CONSUMER_NOT_AVAILABLE,
sc_instance['id'], p_ctx.tenant)

View File

@ -69,7 +69,8 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager):
for driver in self.ordered_drivers:
try:
driver.obj.validate_create(context)
model.set_node_owner(context, driver.obj.name)
if not model.get_node_owner(context):
model.set_node_owner(context, driver.obj.name)
return driver.obj
except n_exc.NeutronException as e:
LOG.warning(e.message)

View File

@ -12,9 +12,13 @@
import time
from heatclient import exc as heat_exc
from neutron.db import api as db_api
from neutron.db import models_v2 as ndb
from neutron.plugins.common import constants as pconst
from neutron_lib.db import model_base
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_serialization import jsonutils
@ -34,6 +38,9 @@ service_chain_opts = [
default=15,
help=_("Seconds to wait for pending stack operation "
"to complete")),
cfg.IntOpt('delete_vip_port_retries',
default=10,
help=_("Retries to check if LB VIP port is deleted")),
cfg.StrOpt('heat_uri',
default='http://localhost:8004/v1',
help=_("Heat API server address to instantiate services "
@ -48,6 +55,7 @@ cfg.CONF.register_opts(service_chain_opts, "heat_node_driver")
EXCLUDE_POOL_MEMBER_TAG = cfg.CONF.heat_node_driver.exclude_pool_member_tag
STACK_ACTION_WAIT_TIME = cfg.CONF.heat_node_driver.stack_action_wait_time
STACK_ACTION_RETRY_WAIT = 5 # Retry after every 5 seconds
DELETE_VIP_PORT_RETRIES = cfg.CONF.heat_node_driver.delete_vip_port_retries
class ServiceNodeInstanceStack(model_base.BASEV2):
@ -107,15 +115,16 @@ class ServiceTypeUpdateNotSupported(exc.NodeCompositionPluginBadRequest):
class HeatNodeDriver(driver_base.NodeDriverBase):
sc_supported_type = [pconst.LOADBALANCER, pconst.FIREWALL]
vendor_name = 'heat_based_node_driver'
initialized = False
required_heat_resources = {pconst.LOADBALANCER: [
'OS::Neutron::LoadBalancer',
'OS::Neutron::Pool'],
pconst.FIREWALL: [
'OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy']}
sc_supported_type = [pconst.LOADBALANCERV2, pconst.FIREWALL]
required_heat_resources = {
pconst.LOADBALANCERV2: ['OS::Neutron::LBaaS::LoadBalancer',
'OS::Neutron::LBaaS::Listener',
'OS::Neutron::LBaaS::Pool'],
pconst.FIREWALL: ['OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy'],
}
@log.log_method_call
def initialize(self, name):
@ -130,7 +139,8 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
def validate_create(self, context):
if context.current_profile is None:
raise ServiceProfileRequired()
if context.current_profile['vendor'] != self.vendor_name:
if context.current_profile['vendor'].lower() != (
self.vendor_name.lower()):
raise NodeVendorMismatch(vendor=self.vendor_name)
service_type = context.current_profile['service_type']
if service_type not in self.sc_supported_type:
@ -206,10 +216,36 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
heatclient = self._get_heat_client(context.plugin_context)
for stack in stack_ids:
vip_port_id = None
try:
rstr = heatclient.client.resources.get(stack_ids[0].stack_id,
'loadbalancer')
vip_port_id = rstr.attributes['vip_port_id']
except heat_exc.HTTPNotFound:
# stack not found, so no need to process any further
pass
heatclient.delete(stack.stack_id)
for stack in stack_ids:
self._wait_for_stack_operation_complete(
heatclient, stack.stack_id, 'delete')
if vip_port_id:
for x in range(0, DELETE_VIP_PORT_RETRIES):
# We intentionally get a new session so as to be
# able to read the updated DB
session = db_api.get_session()
vip_port = session.query(ndb.Port).filter_by(
id=vip_port_id).all()
if vip_port:
# heat stack delete is not finished yet, so try again
LOG.debug(("VIP port %s is not yet deleted"), vip_port)
LOG.debug(("Retry attempt; %s"), x + 1)
# Stack delete will at least take some minimal amount
# of time, hence we wait a little bit.
time.sleep(STACK_ACTION_WAIT_TIME)
else:
# we force a retry so that a new session can be
# used that will correctly reflect the VIP port as
# deleted and hence allow the subsequent policy driver
# to delete the VIP subnet
raise db_exc.RetryRequest(Exception)
self._delete_node_instance_stack_in_db(context.plugin_session,
context.current_node['id'],
context.instance['id'])
@ -230,12 +266,12 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
@log.log_method_call
def update_policy_target_added(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCER:
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
self.update(context)
@log.log_method_call
def update_policy_target_removed(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCER:
if context.current_profile['service_type'] == pconst.LOADBALANCERV2:
self.update(context)
@log.log_method_call
@ -255,6 +291,10 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
current_policy_target_group):
pass
def get_status(self, context):
# TODO(Sumit): Needs to be implemented
return {'status': '', 'status_details': ''}
@property
def name(self):
return self._name
@ -283,12 +323,12 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
is_template_aws_version = stack_template.get(
'AWSTemplateFormatVersion', False)
if service_type == pconst.LOADBALANCER:
if service_type == pconst.LOADBALANCERV2:
self._generate_pool_members(context, stack_template,
config_param_values,
provider_ptg,
is_template_aws_version)
else:
elif service_type == pconst.FIREWALL:
provider_subnet = context.core_plugin.get_subnet(
context.plugin_context, provider_ptg_subnet_id)
consumer_cidrs = []
@ -317,6 +357,8 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
for parameter in node_params:
if parameter == "Subnet":
stack_params[parameter] = provider_ptg_subnet_id
elif parameter == "service_chain_metadata":
stack_params[parameter] = sc_instance['id']
elif parameter in config_param_values:
stack_params[parameter] = config_param_values[parameter]
return (stack_template, stack_params)
@ -419,9 +461,7 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
"destination_port": destination_port,
"action": "allow",
"destination_ip_address": destination_cidr,
"source_ip_address": source_cidr
}
}
"source_ip_address": source_cidr}}
def _generate_pool_members(self, context, stack_template,
config_param_values, provider_ptg,
@ -435,7 +475,7 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
pool_res_name = None
for resource in stack_template[resources_key]:
if stack_template[resources_key][resource][type_key] == (
'OS::Neutron::Pool'):
'OS::Neutron::LBaaS::Pool'):
pool_res_name = resource
break
@ -453,13 +493,13 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
properties_key = ('Properties' if is_template_aws_version
else 'properties')
res_key = 'Ref' if is_template_aws_version else 'get_resource'
return {type_key: "OS::Neutron::PoolMember",
return {type_key: "OS::Neutron::LBaaS::PoolMember",
properties_key: {
"address": member_ip,
"admin_state_up": True,
"pool_id": {res_key: pool_res_name},
# FIXME(Magesh): Need to handle port range
"protocol_port": context.classifier["port_range"],
"pool": {res_key: pool_res_name},
"protocol_port": {'get_param': 'app_port'},
"subnet": {'get_param': 'Subnet'},
"weight": 1}}
def _get_member_ips(self, context, ptg):

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import api as db_api
from neutron.plugins.common import constants as pconst
from neutron.quota import resource_registry
from oslo_config import cfg
@ -23,6 +24,7 @@ from gbpservice._i18n import _LW
from gbpservice.common import utils
from gbpservice.neutron.db import servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.grouppolicy.common import utils as gutils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
context as ctx)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
@ -34,6 +36,9 @@ from gbpservice.neutron.services.servicechain.plugins import sharing
LOG = logging.getLogger(__name__)
PLUMBER_NAMESPACE = 'gbpservice.neutron.servicechain.ncp_plumbers'
cfg.CONF.import_opt('policy_drivers',
'gbpservice.neutron.services.grouppolicy.config',
group='group_policy')
STATUS = 'status'
STATUS_DETAILS = 'status_details'
STATUS_SET = set([STATUS, STATUS_DETAILS])
@ -70,6 +75,10 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
When a Servicechain Instance is created, all its nodes need to be
instantiated.
"""
instance = self._process_commit_phase(context)
if instance:
return instance
session = context.session
deployers = {}
with session.begin(subtransactions=True):
@ -80,19 +89,47 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
raise exc.OneSpecPerInstanceAllowed()
deployers = self._get_scheduled_drivers(context, instance,
'deploy')
if not gutils.is_precommit_policy_driver_configured():
# Actual node deploy
try:
self._deploy_servicechain_nodes(context, deployers)
except Exception:
# Some node could not be deployed
with excutils.save_and_reraise_exception():
LOG.error(_LE("Node deployment failed, "
"deleting servicechain_instance %s"),
instance['id'])
self.delete_servicechain_instance(context, instance['id'])
return instance
def _process_commit_phase(self, context):
if hasattr(context, 'commit_phase'):
if not gutils.is_precommit_policy_driver_configured() and (
context.commit_phase == gp_cts.PRE_COMMIT):
# The following is a bit of a hack to no-op
# the call from the postcommit policy driver
# during the pre-commit phase.
return True
if gutils.is_precommit_policy_driver_configured() and (
context.commit_phase == gp_cts.POST_COMMIT):
instance = self.get_servicechain_instance(
context, context.servicechain_instance['id'])
self._call_deploy_sc_node(context, instance)
return instance
def _call_deploy_sc_node(self, context, instance):
# Actual node deploy
try:
deployers = self._get_scheduled_drivers(
context, instance, 'deploy')
self._deploy_servicechain_nodes(context, deployers)
except Exception:
# Some node could not be deployed
with excutils.save_and_reraise_exception():
LOG.error(_LE("Node deployment failed, "
"deleting servicechain_instance %s"),
"servicechain_instance %s is in ERROR state"),
instance['id'])
self.delete_servicechain_instance(context, instance['id'])
return instance
@log.log_method_call
def get_servicechain_instance(self, context,
@ -113,6 +150,10 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
nodes of the previous spec should be destroyed and the newer ones
created.
"""
instance = self._process_commit_phase(context)
if instance:
return instance
session = context.session
deployers = {}
updaters = {}
@ -139,7 +180,10 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
self._destroy_servicechain_nodes(context, destroyers)
deployers = self._get_scheduled_drivers(
context, updated_instance, 'deploy')
self._deploy_servicechain_nodes(context, deployers)
context.deployers = deployers
context.servicechain_instance = updated_instance
if not gutils.is_precommit_policy_driver_configured():
self._deploy_servicechain_nodes(context, deployers)
else:
self._update_servicechain_nodes(context, updaters)
return updated_instance
@ -533,6 +577,13 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
LOG.error(_LE("Node destroy failed, for node %s "),
driver['context'].current_node['id'])
except Exception as e:
if db_api.is_retriable(e):
with excutils.save_and_reraise_exception():
LOG.debug(
"Node driver '%(name)s' failed in"
" %(method)s, operation will be retried",
{'name': driver._name, 'method': 'delete'}
)
LOG.exception(e)
finally:
self.driver_manager.clear_node_owner(destroy['context'])

View File

@ -287,7 +287,7 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
sorted([i[resource]['id'] for i in items]))
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCER, shared_profile=False,
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, **kwargs):
prof = self.create_service_profile(
service_type=service_type,

View File

@ -58,7 +58,7 @@ class ServiceChainDBTestBase(test_group_policy_db.GroupPolicyDBTestBase):
sorted([i[resource]['id'] for i in items]))
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCER, shared_profile=False,
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, **kwargs):
prof = self.create_service_profile(
service_type=service_type,
@ -488,11 +488,11 @@ class TestServiceChainResources(ServiceChainDbTestCase):
def test_list_service_profile(self):
scns = [self.create_service_profile(name='sp1', description='sp',
service_type='LOADBALANCER'),
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp2', description='sp',
service_type='LOADBALANCER'),
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp3', description='sp',
service_type='LOADBALANCER')]
service_type='LOADBALANCERV2')]
self._test_list_resources('service_profile', scns,
query_params='description=sp')
@ -520,7 +520,7 @@ class TestServiceChainResources(ServiceChainDbTestCase):
def test_delete_service_profile(self):
ctx = context.get_admin_context()
sp = self.create_service_profile(service_type='LOADBALANCER')
sp = self.create_service_profile(service_type='LOADBALANCERV2')
sp_id = sp['service_profile']['id']
scn = self.create_servicechain_node(service_profile_id=sp_id)

View File

@ -645,11 +645,13 @@ class DummyDictionaries(object):
'service_id': '1200332d-b432-403b-8350-89b782256be5',
'service_profile_id': 'ab3b704b-a7d9-4c55-ab43-57ed5e29867d',
'id': '5ad7439b-7259-47cd-be88-36f641e0b5c8',
'name': 'LOADBALANCER.haproxy.507988d2-4b46-4df4-99d2-746676500872'
'name':
'LOADBALANCERV2.haproxy.507988d2-4b46-4df4-99d2-746676500872'
},
'network_function_instance': {
'status': 'ACTIVE',
'name': 'LOADBALANCER.haproxy.507988d2-4b46-4df4-99d2-7466765002',
'name':
'LOADBALANCERV2.haproxy.507988d2-4b46-4df4-99d2-7466765002',
'network_function_device_id': '3c3e502a-256e-4597-91a9-71902380c0',
'tenant_id': 'ee27b1d0d7f04ac390ee7ec4b2fd5b13',
'ha_state': None,
@ -671,7 +673,7 @@ class DummyDictionaries(object):
'reference_count': 1,
'interfaces_in_use': 2,
'id': '3c3e502a-256e-4597-91a9-719023808ec0',
'name': 'LOADBALANCER.haproxy.507988d2-4b46-4df4-99d2-7466765008'
'name': 'LOADBALANCERV2.haproxy.507988d2-4b46-4df4-99d2-7466765008'
}
}

View File

@ -119,7 +119,7 @@ class GroupPolicyPluginTestBase(tgpmdb.GroupPolicyMappingDbTestCase):
def _create_servicechain_spec(self, node_types=None, shared=False):
node_types = node_types or []
if not node_types:
node_types = ['LOADBALANCER']
node_types = ['LOADBALANCERV2']
node_ids = []
for node_type in node_types:
node_ids.append(self._create_servicechain_node(node_type,
@ -135,7 +135,7 @@ class GroupPolicyPluginTestBase(tgpmdb.GroupPolicyMappingDbTestCase):
scs_id = spec['servicechain_spec']['id']
return scs_id
def _create_servicechain_node(self, node_type="LOADBALANCER",
def _create_servicechain_node(self, node_type="LOADBALANCERV2",
shared=False):
config = "{}"
data = {'servicechain_node': {'service_type': node_type,

View File

@ -467,7 +467,8 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
filter_by(policy_target_group_id=ptg_id).
all())
def _create_service_profile(self, node_type='LOADBALANCER', shared=False):
def _create_service_profile(self, node_type='LOADBALANCERV2',
shared=False):
data = {'service_type': node_type, 'shared': shared}
profile = self.create_service_profile(expected_res_status=201,
is_admin_context=shared,
@ -475,7 +476,7 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
scp_id = profile['service_profile']['id']
return scp_id
def _create_servicechain_node(self, node_type="LOADBALANCER",
def _create_servicechain_node(self, node_type="LOADBALANCERV2",
shared=False):
profile_id = self._create_service_profile(node_type, shared=shared)
data = {'service_profile_id': profile_id,
@ -487,7 +488,7 @@ class ResourceMappingTestCase(test_plugin.GroupPolicyPluginTestCase):
return scn_id
def _create_servicechain_spec(self, node_types=None, shared=False):
node_types = node_types or ['LOADBALANCER']
node_types = node_types or ['LOADBALANCERV2']
node_ids = []
for node_type in node_types:
node_ids.append(self._create_servicechain_node(

View File

@ -58,7 +58,7 @@ class BaseTestGroupPolicyPluginGroupResources(
def test_spec_shared(self):
# Shared spec can only point shared nodes
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=True, shared_profile=True,
'LOADBALANCERV2', shared=True, shared_profile=True,
profile_tenant_id='admin', tenant_id='admin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=201)
@ -67,7 +67,7 @@ class BaseTestGroupPolicyPluginGroupResources(
expected_res_status=201)
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=False, profile_tenant_id='nonadmin',
'LOADBALANCERV2', shared=False, profile_tenant_id='nonadmin',
tenant_id='nonadmin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=404)
@ -81,7 +81,7 @@ class BaseTestGroupPolicyPluginGroupResources(
def test_node_shared(self):
# Shared node can only point shared profile
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
to_update = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
@ -91,7 +91,7 @@ class BaseTestGroupPolicyPluginGroupResources(
expected_res_status=201)
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False,
service_type='LOADBALANCERV2', shared=False,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
@ -118,7 +118,7 @@ class BaseTestGroupPolicyPluginGroupResources(
def test_profile_shared(self):
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
@ -134,7 +134,7 @@ class BaseTestGroupPolicyPluginGroupResources(
res['NeutronError']['type'])
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False)['service_profile']
service_type='LOADBALANCERV2', shared=False)['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
expected_res_status=201)
@ -155,7 +155,7 @@ class BaseTestGroupPolicyPluginGroupResources(
plugin_context.tenant_id = self._tenant_id
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCERV2')['service_profile']
current = self.create_servicechain_node(
service_profile_id=prof['id'],
expected_res_status=201)['servicechain_node']
@ -171,7 +171,7 @@ class BaseTestGroupPolicyPluginGroupResources(
# Original node with profile
prof2 = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCERV2')['service_profile']
original = self.create_servicechain_node(
service_profile_id=prof2['id'],
expected_res_status=201)['servicechain_node']
@ -225,7 +225,7 @@ class BaseTestGroupPolicyPluginGroupResources(
dict((x, {}) for x in params)})
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters

View File

@ -75,6 +75,7 @@ class MockHeatClientDeleteNotFound(object):
class MockHeatClient(object):
def __init__(self, api_version, endpoint, **kwargs):
self.stacks = MockHeatClientFunctions()
self.resources = mock.MagicMock()
class HeatNodeDriverTestCase(
@ -84,31 +85,27 @@ class HeatNodeDriverTestCase(
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"test_pool": {
"Type": "OS::Neutron::Pool",
"Type": "OS::Neutron::LBaaS::Pool",
"Properties": {
"admin_state_up": True,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref": "HttpHM"}],
"name": "Haproxy pool",
"description": "Haproxy pool from template",
"lb_algorithm": "ROUND_ROBIN",
"protocol": "HTTP",
"subnet_id": {"Ref": "Subnet"},
"vip": {
"subnet": {"Ref": "Subnet"},
"address": {"Ref": "vip_ip"},
"name": "Haproxy vip",
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": True,
"description": "Haproxy vip from template"
}
'listener': {u'get_resource': u'listener'},
}
},
"test_listener": {
"Type": "OS::Neutron::LBaaS::Listener",
"Properties": {
"protocol": "HTTP",
"protocol_port": 80,
}
},
"test_lb": {
"Type": "OS::Neutron::LoadBalancer",
"Type": "OS::Neutron::LBaaS::LoadBalancer",
"Properties": {
"pool_id": {"Ref": "HaproxyPool"},
"protocol_port": 80
"provider": 'haproxy',
'vip_address': '1.1.1.1',
'vip_subnet': '1.1.1.0/24',
}
}
}
@ -173,7 +170,7 @@ class HeatNodeDriverTestCase(
self.assertTrue(driver.obj.initialized)
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCER, shared_profile=False,
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, profile_id=None, **kwargs):
if not profile_id:
prof = self.create_service_profile(
@ -237,13 +234,14 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
member_ip = port['fixed_ips'][0]['ip_address']
member_name = 'mem-' + member_ip
member = {member_name: {
'Type': 'OS::Neutron::PoolMember',
'Type': 'OS::Neutron::LBaaS::PoolMember',
'Properties': {
'protocol_port': '80',
'admin_state_up': True,
'pool_id': {'Ref': u'test_pool'},
'subnet': {'get_param': 'Subnet'},
'weight': 1,
'address': member_ip
'admin_state_up': True,
'address': member_ip,
'protocol_port': {'get_param': 'app_port'},
'pool': {'Ref': u'test_pool'}
}
}
}
@ -277,7 +275,8 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
'id': uuidutils.generate_uuid()}}
node_id = self._create_profiled_servicechain_node(
service_type=constants.LOADBALANCER)['servicechain_node']['id']
service_type=constants.LOADBALANCERV2)[
'servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
@ -533,7 +532,7 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prof = self.create_service_profile(
service_type=constants.LOADBALANCER,
service_type=constants.LOADBALANCERV2,
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
@ -592,8 +591,6 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
self.assertEqual(STACK_ACTION_WAIT_TIME / 5,
stack_get.call_count)
# Create and delete another service chain instance and verify that
# we call get method for heat stack only once if the stack state
@ -613,7 +610,6 @@ class TestServiceChainInstance(HeatNodeDriverTestCase):
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
self.assertEqual(1, stack_get.call_count)
def test_stack_not_found_ignored(self):
mock.patch(heatclient.__name__ + ".client.Client",

View File

@ -87,7 +87,7 @@ class NodeCompositionPluginTestMixin(object):
return prs
def _create_simple_service_chain(self, number_of_nodes=1,
service_type='LOADBALANCER'):
service_type='LOADBALANCERV2'):
prof = self.create_service_profile(
service_type=service_type,
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
@ -149,7 +149,7 @@ class NodeCompositionPluginTestCase(
def _create_simple_chain(self):
node = self._create_profiled_servicechain_node(
service_type="LOADBALANCER",
service_type="LOADBALANCERV2",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
@ -179,7 +179,7 @@ class NodeCompositionPluginTestCase(
# Verify Context attributes for simple config
plugin_context = n_context.get_admin_context()
profile = self._create_service_profile(
service_type="LOADBALANCER",
service_type="LOADBALANCERV2",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'],
@ -228,7 +228,7 @@ class NodeCompositionPluginTestCase(
def test_context_relevant_specs(self):
plugin_context = n_context.get_admin_context()
node_used = self._create_profiled_servicechain_node(
service_type="LOADBALANCER",
service_type="LOADBALANCERV2",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
@ -267,7 +267,7 @@ class NodeCompositionPluginTestCase(
dict((x, {}) for x in params)})
prof = self._create_service_profile(
service_type='LOADBALANCER', shared=True,
service_type='LOADBALANCERV2', shared=True,
vendor=self.SERVICE_PROFILE_VENDOR,
tenant_id='admin')['service_profile']
@ -363,7 +363,7 @@ class NodeCompositionPluginTestCase(
validate_update = self.driver.validate_update = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
@ -391,7 +391,7 @@ class NodeCompositionPluginTestCase(
def test_update_instantiated_profile_fails(self):
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
@ -463,7 +463,7 @@ class NodeCompositionPluginTestCase(
def test_multiple_nodes_update(self):
update = self.driver.update = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -481,7 +481,7 @@ class NodeCompositionPluginTestCase(
def test_inuse_spec_node_update_rejected(self):
prof = self.create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node1 = self.create_servicechain_node(
@ -508,7 +508,7 @@ class NodeCompositionPluginTestCase(
def test_instance_update(self):
prof = self.create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node1 = self.create_servicechain_node(
@ -544,7 +544,7 @@ class NodeCompositionPluginTestCase(
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -578,7 +578,7 @@ class NodeCompositionPluginTestCase(
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -608,7 +608,7 @@ class NodeCompositionPluginTestCase(
update_hook = self.driver.notify_chain_parameters_updated = mock.Mock()
prof = self.create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -695,7 +695,7 @@ class NodeCompositionPluginTestCase(
rem = self.driver.update_node_consumer_ptg_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -740,7 +740,7 @@ class NodeCompositionPluginTestCase(
rem = self.driver.update_node_consumer_ptg_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -776,7 +776,7 @@ class NodeCompositionPluginTestCase(
upd = self.driver.policy_target_group_updated = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
@ -790,6 +790,10 @@ class NodeCompositionPluginTestCase(
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
# TODO(Sumit): Remove the following mocks
# once Heat node driver supports reporting status
provider['status'] = mock.ANY
provider['status_details'] = mock.ANY
# Verify notification issued for PTG consuming
upd.assert_called_with(mock.ANY, None, provider)
upd.reset_mock()
@ -870,7 +874,7 @@ class TestQuotasForServiceChain(test_base.ServiceChainPluginTestCase):
def test_quota_implicit_service_instance(self):
prof = self.create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor="vendor")['service_profile']
node1_id = self.create_servicechain_node(

View File

@ -167,7 +167,7 @@ class NFPNodeDriverTestCase(
if not kwargs.get('insertion_mode'):
kwargs['insertion_mode'] = 'l3'
if not kwargs.get('service_flavor'):
if kwargs['service_type'] == 'LOADBALANCER':
if kwargs['service_type'] == 'LOADBALANCERV2':
kwargs['service_flavor'] = 'haproxy'
else:
kwargs['service_flavor'] = 'vyos'
@ -204,7 +204,7 @@ class NFPNodeDriverTestCase(
self.assertTrue(driver.obj.initialized)
def _nfp_create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCER, shared_profile=False,
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, profile_id=None,
service_flavor=None, **kwargs):
if not profile_id:
@ -523,7 +523,7 @@ class TestServiceChainInstance(NFPNodeDriverTestCase):
def test_is_node_order_in_spec_supported(self):
lb_prof = self.create_service_profile(
service_type=constants.LOADBALANCER,
service_type=constants.LOADBALANCERV2,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='haproxy')['service_profile']

View File

@ -143,7 +143,7 @@ class TrafficStitchingPlumberTestCase(base.NodeCompositionPluginTestCase):
context = n_context.get_admin_context()
self.driver.get_plumbing_info = self.get_plumbing_info_base
lb_prof = self._create_service_profile(
service_type='LOADBALANCER',
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
lb_node = self.create_servicechain_node(
service_profile_id=lb_prof['id'],

View File

@ -42,7 +42,7 @@ info_mapping = {
info_mapping['FIREWALL'] = info_mapping[GATEWAY]
info_mapping['FIREWALL_HA'] = info_mapping[GATEWAY_HA]
info_mapping['TRANSPARENT_FIREWALL'] = info_mapping[TRANSPARENT]
info_mapping['LOADBALANCER'] = info_mapping[ENDPOINT]
info_mapping['LOADBALANCERV2'] = info_mapping[ENDPOINT]
class ResourceMappingStitchingPlumberGBPTestCase(
@ -188,7 +188,7 @@ class TestImplicitServiceChains(ResourceMappingStitchingPlumberGBPTestCase,
def test_endpoint_target_vif_details(self):
context = n_context.get_admin_context()
self._create_simple_service_chain(service_type='LOADBALANCER')
self._create_simple_service_chain(service_type='LOADBALANCERV2')
targets = model.get_service_targets(context.session)
self.assertTrue(len(targets) > 0)
for target in targets: