Heat Based Node driver for Node Composition plugin

This patch adds a heat api based node driver for NCP plugin
which instantiates advanced services using heat.

Partially implements blueprint node-centric-chain-plugin

Change-Id: Ie177bdf220ae8259afee3319e0fb37eb12f03ee3
This commit is contained in:
Magesh GV 2015-06-09 16:01:45 +05:30
parent 3d6a35322c
commit b8dc86daab
15 changed files with 1333 additions and 61 deletions

View File

@ -0,0 +1,43 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ncp_node_instance_stacks
Revision ID: 5358a28fb97d
Revises: d08627f64e37
"""
# revision identifiers, used by Alembic.
revision = '5358a28fb97d'
down_revision = 'd08627f64e37'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.create_table(
'ncp_node_instance_stacks',
sa.Column('sc_instance_id', sa.String(length=36), nullable=False),
sa.Column('sc_node_id', sa.String(length=36), nullable=False),
sa.Column('stack_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('sc_instance_id', 'sc_node_id', 'stack_id')
)
def downgrade(active_plugins=None, options=None):
pass

View File

@ -1 +1 @@
d08627f64e37
5358a28fb97d

View File

@ -14,7 +14,6 @@ from neutron import context as n_context
from neutron import manager
from neutron.plugins.common import constants as pconst
from gbpservice.neutron.extensions import group_policy
from gbpservice.neutron.services.servicechain.plugins.ncp import model
@ -28,8 +27,12 @@ def get_node_driver_context(sc_plugin, context, sc_instance,
specs = sc_plugin.get_servicechain_specs(
context, filters={'id': sc_instance['servicechain_specs']})
position = _calculate_node_position(specs, current_node['id'])
provider = _get_ptg_or_ep(context, sc_instance['provider_ptg_id'])
consumer = _get_ptg_or_ep(context, sc_instance['consumer_ptg_id'])
provider, _ = _get_ptg_or_ep(
context, sc_instance['provider_ptg_id'])
consumer, is_consumer_external = _get_ptg_or_ep(
context, sc_instance['consumer_ptg_id'])
classifier = get_gbp_plugin().get_policy_classifier(
context, sc_instance['classifier_id'])
current_profile = sc_plugin.get_service_profile(
context, current_node['service_profile_id'])
original_profile = sc_plugin.get_service_profile(
@ -52,19 +55,23 @@ def get_node_driver_context(sc_plugin, context, sc_instance,
original_service_chain_node=original_node,
original_service_profile=original_profile,
service_targets=service_targets,
position=position)
position=position,
classifier=classifier,
is_consumer_external=is_consumer_external)
def _get_ptg_or_ep(context, group_id):
group = None
is_group_external = False
if group_id:
try:
group = get_gbp_plugin().get_policy_target_group(context, group_id)
except group_policy.PolicyTargetGroupNotFound:
# Could be EP
context.session.rollback()
groups = get_gbp_plugin().get_policy_target_groups(
context, filters = {'id': [group_id]})
if groups:
group = groups[0]
else:
group = get_gbp_plugin().get_external_policy(context, group_id)
return group
is_group_external = True
return (group, is_group_external)
def _calculate_node_position(specs, node_id):
@ -83,7 +90,8 @@ class NodeDriverContext(object):
service_chain_specs, current_service_chain_node, position,
current_service_profile, provider_group, consumer_group=None,
management_group=None, original_service_chain_node=None,
original_service_profile=None, service_targets=None):
original_service_profile=None, service_targets=None,
classifier=None, is_consumer_external=False):
self._gbp_plugin = get_gbp_plugin()
self._sc_plugin = sc_plugin
self._plugin_context = context
@ -98,6 +106,8 @@ class NodeDriverContext(object):
self._provider_group = provider_group
self._consumer_group = consumer_group
self._management_group = management_group
self._classifier = classifier
self._is_consumer_external = is_consumer_external
self._relevant_specs = None
self._core_plugin = manager.NeutronManager.get_plugin()
self._l3_plugin = manager.NeutronManager.get_service_plugins().get(
@ -166,6 +176,10 @@ class NodeDriverContext(object):
def original_profile(self):
return self._original_service_profile
@property
def is_consumer_external(self):
return self._is_consumer_external
@property
def relevant_specs(self):
"""Get specs on the SCI containing this particular Node."""
@ -186,6 +200,10 @@ class NodeDriverContext(object):
def management(self):
return self._management_group
@property
def classifier(self):
return self._classifier
def get_service_targets(self, update=False):
""" Returns the service targets assigned for this service if any.
The result looks like the following:

View File

@ -57,3 +57,8 @@ class NotAvailablePTGForTargetRequest(PlumbingException):
message = _("PTG of type %(ptg_type)s doesn't exist for service chain "
"instance %(instance)s. However, it is required by the "
"scheduled Node Driver in order to deploy Node %(node)s")
class InuseSpecNodeUpdateNotAllowed(NodeCompositionPluginBadRequest):
message = _("The Node Composition Plugin does not support updating the "
"nodes in an instantiated servicechain spec.")

View File

@ -78,7 +78,7 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager):
Given a NodeContext, this method returns the driver capable of
destroying the specific node.
"""
return self._get_owning_driver(context)
return self.get_owning_driver(context)
def schedule_update(self, context):
"""Schedule Node Driver for Node Update.
@ -86,12 +86,12 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager):
Given a NodeContext, this method returns the driver capable of updating
the specific node.
"""
driver = self._get_owning_driver(context)
driver = self.get_owning_driver(context)
if driver:
driver.validate_update(context)
return driver
def _get_owning_driver(self, context):
def get_owning_driver(self, context):
owner = model.get_node_owner(context)
if owner:
driver = self.drivers.get(owner[0].driver_name)

View File

@ -0,0 +1,463 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutron.common import log
from neutron.db import model_base
from neutron.plugins.common import constants as pconst
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import sqlalchemy as sa
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
openstack_heat_api_client as heat_api_client)
LOG = logging.getLogger(__name__)
service_chain_opts = [
cfg.IntOpt('stack_action_wait_time',
default=15,
help=_("Seconds to wait for pending stack operation "
"to complete")),
cfg.StrOpt('heat_uri',
default='http://localhost:8004/v1',
help=_("Heat API server address to instantiate services "
"specified in the service chain.")),
cfg.StrOpt('exclude_pool_member_tag',
default='ExcludePoolMember',
help=_("Policy Targets created for the LB Pool Members should "
"have this tag in their description")),
]
cfg.CONF.register_opts(service_chain_opts, "heat_node_driver")
EXCLUDE_POOL_MEMBER_TAG = cfg.CONF.heat_node_driver.exclude_pool_member_tag
STACK_ACTION_WAIT_TIME = cfg.CONF.heat_node_driver.stack_action_wait_time
STACK_ACTION_RETRY_WAIT = 5 # Retry after every 5 seconds
class ServiceNodeInstanceStack(model_base.BASEV2):
"""ServiceChainInstance stacks owned by the Node driver."""
__tablename__ = 'ncp_node_instance_stacks'
sc_instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
sc_node_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
stack_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
class InvalidServiceType(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only supports the services "
"Firewall and LB in a Service Chain")
class ServiceProfileRequired(exc.NodeCompositionPluginBadRequest):
message = _("A Service profile is required in Service node")
class NodeVendorMismatch(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only handles nodes which have service "
"profile with vendor name %(vendor)s")
class ServiceConfigNotJsonString(exc.NodeCompositionPluginBadRequest):
message = _("Service config should be a json string for the Heat Node "
"driver")
class HeatTemplateVersionNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver only supports AWS and HOT template "
"formats for service node config")
class ServiceResourceDefinitionsMissing(exc.NodeCompositionPluginBadRequest):
message = _("The Service template does not have service resources defined")
class HeatResourceMissing(exc.NodeCompositionPluginBadRequest):
message = _("The service template requires the Resource %(resource)s for "
"service type %(servicetype)s")
class ProfileUpdateNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver does not allow updating the "
"service profile used by a Node")
class ServiceTypeUpdateNotSupported(exc.NodeCompositionPluginBadRequest):
message = _("The Heat Node driver does not allow updating the "
"service type used by a Node")
class HeatNodeDriver(driver_base.NodeDriverBase):
sc_supported_type = [pconst.LOADBALANCER, pconst.FIREWALL]
vendor_name = 'heat_based_node_driver'
initialized = False
required_heat_resources = {pconst.LOADBALANCER: [
'OS::Neutron::LoadBalancer',
'OS::Neutron::Pool'],
pconst.FIREWALL: [
'OS::Neutron::Firewall',
'OS::Neutron::FirewallPolicy']}
@log.log
def initialize(self, name):
self.initialized = True
self._name = name
@log.log
def get_plumbing_info(self, context):
pass
@log.log
def validate_create(self, context):
if context.current_profile is None:
raise ServiceProfileRequired()
if context.current_profile['vendor'] != self.vendor_name:
raise NodeVendorMismatch(vendor=self.vendor_name)
service_type = context.current_profile['service_type']
if service_type not in self.sc_supported_type:
raise InvalidServiceType()
self._validate_service_config(context.current_node['config'],
service_type)
@log.log
def validate_update(self, context):
if not context.original_node: # PT create/delete notifications
return
if context.current_profile != context.original_profile:
raise ProfileUpdateNotSupported()
if (context.current_node['service_type'] !=
context.original_node['service_type']):
raise ServiceTypeUpdateNotSupported()
else:
service_type = context.current_profile['service_type']
self._validate_service_config(context.current_node['config'],
service_type)
def _validate_service_config(self, service_template, service_type):
if not service_template:
raise ServiceResourceDefinitionsMissing()
try:
service_template = jsonutils.loads(service_template)
except Exception:
raise ServiceConfigNotJsonString()
if (not service_template.get('AWSTemplateFormatVersion') and
not service_template.get('heat_template_version')):
raise HeatTemplateVersionNotSupported()
is_template_aws_version = service_template.get(
'AWSTemplateFormatVersion', False)
resources_key = 'Resources' if is_template_aws_version else 'resources'
if not service_template.get(resources_key):
raise ServiceResourceDefinitionsMissing()
for resource_name in self.required_heat_resources[service_type]:
param_key = self._get_heat_resource_key(
service_template[resources_key],
is_template_aws_version,
resource_name)
if not param_key:
raise HeatResourceMissing(resource=resource_name,
servicetype=service_type)
@log.log
def create(self, context):
heatclient = self._get_heat_client(context.plugin_context)
stack_template, stack_params = self._fetch_template_and_params(context)
stack_name = ("stack_" + context.instance['name'] +
context.current_node['name'] +
context.instance['id'][:8] +
context.current_node['id'][:8])
# Heat does not accept space in stack name
stack_name = stack_name.replace(" ", "")
stack = heatclient.create(stack_name, stack_template, stack_params)
self._insert_node_instance_stack_in_db(
context.plugin_session, context.current_node['id'],
context.instance['id'], stack['stack']['id'])
@log.log
def delete(self, context):
stack_ids = self._get_node_instance_stacks(context.plugin_session,
context.current_node['id'],
context.instance['id'])
heatclient = self._get_heat_client(context.plugin_context)
for stack in stack_ids:
heatclient.delete(stack.stack_id)
for stack in stack_ids:
self._wait_for_stack_operation_complete(
heatclient, stack.stack_id, 'delete')
self._delete_node_instance_stack_in_db(context.plugin_session,
context.current_node['id'],
context.instance['id'])
@log.log
def update(self, context):
heatclient = self._get_heat_client(context.plugin_context)
stack_template, stack_params = self._fetch_template_and_params(context)
stack_ids = self._get_node_instance_stacks(context.plugin_session,
context.current_node['id'],
context.instance['id'])
for stack in stack_ids:
self._wait_for_stack_operation_complete(
heatclient, stack.stack_id, 'update')
heatclient.update(stack.stack_id, stack_template, stack_params)
@log.log
def update_policy_target_added(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCER:
self.update(context)
@log.log
def update_policy_target_removed(self, context, policy_target):
if context.current_profile['service_type'] == pconst.LOADBALANCER:
self.update(context)
@property
def name(self):
return self._name
def _get_heat_client(self, plugin_context):
return heat_api_client.HeatClient(
plugin_context,
cfg.CONF.heat_node_driver.heat_uri)
def _fetch_template_and_params(self, context):
sc_instance = context.instance
provider_ptg = context.provider
# TODO(Magesh): Handle multiple subnets
provider_ptg_subnet_id = provider_ptg['subnets'][0]
consumer = context.consumer
service_type = context.current_profile['service_type']
stack_template = context.current_node.get('config')
stack_template = jsonutils.loads(stack_template)
config_param_values = sc_instance.get('config_param_values', {})
stack_params = {}
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)
is_template_aws_version = stack_template.get(
'AWSTemplateFormatVersion', False)
if service_type == pconst.LOADBALANCER:
self._generate_pool_members(context, stack_template,
config_param_values,
provider_ptg,
is_template_aws_version)
else:
provider_subnet = context.core_plugin.get_subnet(
context.plugin_context, provider_ptg_subnet_id)
if context.is_consumer_external:
# REVISIT(Magesh): Allowing the first destination which is 0/0
# Validate and skip adding FW rule in case routes is not set
es = context.gbp_plugin.get_external_segment(
context.plugin_context, consumer['external_segments'][0])
consumer_cidrs = [x['destination']
for x in es['external_routes']]
else:
consumer_subnet = context.core_plugin.get_subnet(
context._plugin_context, consumer['subnets'][0])
consumer_cidrs = [consumer_subnet['cidr']]
provider_cidr = provider_subnet['cidr']
self._update_template_with_firewall_rules(
context, provider_ptg, provider_cidr, consumer_cidrs,
stack_template, is_template_aws_version)
node_params = (stack_template.get('Parameters')
or stack_template.get('parameters')
or [])
for parameter in node_params:
if parameter == "Subnet":
stack_params[parameter] = provider_ptg_subnet_id
elif parameter in config_param_values:
stack_params[parameter] = config_param_values[parameter]
return (stack_template, stack_params)
def _wait_for_stack_operation_complete(self, heatclient, stack_id, action):
time_waited = 0
while True:
try:
stack = heatclient.get(stack_id)
if stack.stack_status == 'DELETE_FAILED':
heatclient.delete(stack_id)
elif stack.stack_status not in ['UPDATE_IN_PROGRESS',
'PENDING_DELETE']:
return
except Exception:
LOG.exception(_("Retrieving the stack %(stack)s failed."),
{'stack': stack_id})
return
else:
time.sleep(STACK_ACTION_RETRY_WAIT)
time_waited = time_waited + STACK_ACTION_RETRY_WAIT
if time_waited >= STACK_ACTION_WAIT_TIME:
LOG.error(_("Stack %(action)s not completed within "
"%(wait)s seconds"),
{'action': action,
'wait': STACK_ACTION_WAIT_TIME,
'stack': stack_id})
return
def _delete_node_instance_stack_in_db(self, session, sc_node_id,
sc_instance_id):
with session.begin(subtransactions=True):
stacks = (session.query(ServiceNodeInstanceStack).
filter_by(sc_node_id=sc_node_id).
filter_by(sc_instance_id=sc_instance_id).
all())
for stack in stacks:
session.delete(stack)
def _insert_node_instance_stack_in_db(self, session, sc_node_id,
sc_instance_id, stack_id):
with session.begin(subtransactions=True):
chainstack = ServiceNodeInstanceStack(
sc_node_id=sc_node_id,
sc_instance_id=sc_instance_id,
stack_id=stack_id)
session.add(chainstack)
def _get_node_instance_stacks(self, session, sc_node_id=None,
sc_instance_id=None):
with session.begin(subtransactions=True):
query = session.query(ServiceNodeInstanceStack)
if sc_node_id:
query = query.filter_by(sc_node_id=sc_node_id)
if sc_instance_id:
query = query.filter_by(sc_instance_id=sc_instance_id)
return query.all()
def _update_template_with_firewall_rules(self, context, provider_ptg,
provider_cidr, consumer_cidrs,
stack_template,
is_template_aws_version):
resources_key = ('Resources' if is_template_aws_version
else 'resources')
properties_key = ('Properties' if is_template_aws_version
else 'properties')
ref_key = 'Ref' if is_template_aws_version else 'get_resource'
rule_num = 1
rule_list = []
for consumer_cidr in consumer_cidrs:
rule_name = "Rule_" + str(rule_num)
rule_num = rule_num + 1
stack_template[resources_key][rule_name] = (
self._generate_firewall_rule(
is_template_aws_version, context.classifier["protocol"],
context.classifier["port_range"],
provider_cidr, consumer_cidr))
rule_list.append({ref_key: rule_name})
resource_name = 'OS::Neutron::FirewallPolicy'
fw_policy_key = self._get_heat_resource_key(
stack_template[resources_key],
is_template_aws_version,
resource_name)
stack_template[resources_key][fw_policy_key][properties_key][
'firewall_rules'] = rule_list
def _generate_firewall_rule(self, is_template_aws_version, protocol,
destination_port, destination_cidr,
source_cidr):
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
return {type_key: "OS::Neutron::FirewallRule",
properties_key: {
"protocol": protocol,
"enabled": True,
"destination_port": destination_port,
"action": "allow",
"destination_ip_address": destination_cidr,
"source_ip_address": source_cidr
}
}
def _generate_pool_members(self, context, stack_template,
config_param_values, provider_ptg,
is_template_aws_version):
resources_key = 'Resources' if is_template_aws_version else 'resources'
type_key = 'Type' if is_template_aws_version else 'type'
member_ips = self._get_member_ips(context, provider_ptg)
if not member_ips:
return
pool_res_name = None
for resource in stack_template[resources_key]:
if stack_template[resources_key][resource][type_key] == (
'OS::Neutron::Pool'):
pool_res_name = resource
break
for member_ip in member_ips:
member_name = 'mem-' + member_ip
stack_template[resources_key][member_name] = (
self._generate_pool_member_template(
context, is_template_aws_version,
pool_res_name, member_ip))
def _generate_pool_member_template(self, context,
is_template_aws_version,
pool_res_name, member_ip):
type_key = 'Type' if is_template_aws_version else 'type'
properties_key = ('Properties' if is_template_aws_version
else 'properties')
res_key = 'Ref' if is_template_aws_version else 'get_resource'
return {type_key: "OS::Neutron::PoolMember",
properties_key: {
"address": member_ip,
"admin_state_up": True,
"pool_id": {res_key: pool_res_name},
# FIXME(Magesh): Need to handle port range
"protocol_port": context.classifier["port_range"],
"weight": 1}}
def _get_member_ips(self, context, ptg):
member_addresses = []
policy_target_groups = context.gbp_plugin.get_policy_targets(
context.plugin_context,
filters={'id': ptg.get("policy_targets")})
for policy_target in policy_target_groups:
if EXCLUDE_POOL_MEMBER_TAG not in policy_target['description']:
port_id = policy_target.get("port_id")
if port_id:
port = context.core_plugin.get_port(
context._plugin_context, port_id)
ip = port.get('fixed_ips')[0].get("ip_address")
member_addresses.append(ip)
return member_addresses
def _get_heat_resource_key(self, template_resource_dict,
is_template_aws_version, resource_name):
type_key = 'Type' if is_template_aws_version else 'type'
for key in template_resource_dict:
if template_resource_dict[key].get(type_key) == resource_name:
return key

View File

@ -0,0 +1,62 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as heat_client
from heatclient import exc as heat_exc
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class HeatClient:
def __init__(self, context, heat_uri, password=None,
auth_token=None):
api_version = "1"
endpoint = "%s/%s" % (heat_uri, context.tenant)
kwargs = {
'token': auth_token or context.auth_token,
'username': context.user_name,
'password': password
}
self.client = heat_client.Client(api_version, endpoint, **kwargs)
self.stacks = self.client.stacks
def create(self, name, data, parameters=None):
fields = {
'stack_name': name,
'timeout_mins': 30,
'disable_rollback': True,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.create(**fields)
def update(self, stack_id, data, parameters=None):
fields = {
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.update(stack_id, **fields)
def delete(self, stack_id):
try:
self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound:
LOG.warn(_("Stack %(stack)s created by service chain driver is "
"not found at cleanup"), {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)

View File

@ -205,6 +205,10 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
servicechain_spec, set_params=False)
self._validate_shared_update(context, original_sc_spec,
updated_sc_spec, 'servicechain_spec')
# REVISIT(Magesh): Handle this update in a proper way
if (original_sc_spec['nodes'] != updated_sc_spec['nodes'] and
original_sc_spec['instances']):
raise exc.InuseSpecNodeUpdateNotAllowed()
return updated_sc_spec
@log.log

View File

@ -0,0 +1,616 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import copy
import heatclient
import mock
from neutron import context as neutron_context
from neutron.extensions import external_net as external_net
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from oslo_serialization import jsonutils
import webob
from gbpservice.neutron.services.servicechain.plugins.ncp import config
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
heat_node_driver as heat_node_driver)
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
openstack_heat_api_client as heatClient)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as test_ncp_plugin)
STACK_ACTION_WAIT_TIME = 15
class MockStackObject(object):
def __init__(self, status):
self.stack_status = status
class MockHeatClientFunctionsDeleteNotFound(object):
def delete(self, stack_id):
raise heatclient.exc.HTTPNotFound()
def create(self, **fields):
return {'stack': {'id': uuidutils.generate_uuid()}}
def get(self, stack_id):
return MockStackObject('DELETE_COMPLETE')
class MockHeatClientFunctions(object):
def delete(self, stack_id):
pass
def create(self, **fields):
return {'stack': {'id': uuidutils.generate_uuid()}}
def get(self, stack_id):
return MockStackObject('DELETE_COMPLETE')
class MockHeatClientDeleteNotFound(object):
def __init__(self, api_version, endpoint, **kwargs):
self.stacks = MockHeatClientFunctionsDeleteNotFound()
class MockHeatClient(object):
def __init__(self, api_version, endpoint, **kwargs):
self.stacks = MockHeatClientFunctions()
class HeatNodeDriverTestCase(
test_ncp_plugin.NodeCompositionPluginTestCase):
DEFAULT_LB_CONFIG_DICT = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"test_pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"admin_state_up": True,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref": "HttpHM"}],
"name": "Haproxy pool",
"protocol": "HTTP",
"subnet_id": {"Ref": "Subnet"},
"vip": {
"subnet": {"Ref": "Subnet"},
"address": {"Ref": "vip_ip"},
"name": "Haproxy vip",
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": True,
"description": "Haproxy vip from template"
}
}
},
"test_lb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"pool_id": {"Ref": "HaproxyPool"},
"protocol_port": 80
}
}
}
}
DEFAULT_LB_CONFIG = jsonutils.dumps(DEFAULT_LB_CONFIG_DICT)
DEFAULT_FW_CONFIG_DICT = {
"heat_template_version": "2013-05-23",
"resources": {
'test_fw': {
"type": "OS::Neutron::Firewall",
"properties": {
"admin_state_up": True,
"firewall_policy_id": {
"get_resource": "Firewall_policy"},
"name": "testFirewall",
"description": "test Firewall"
}
},
'test_fw_policy': {
"type": "OS::Neutron::FirewallPolicy",
"properties": {
"shared": False,
"description": "test firewall policy",
"name": "testFWPolicy",
"firewall_rules": [{
"get_resource": "Rule_1"}],
"audited": True
}
}
}
}
DEFAULT_FW_CONFIG = jsonutils.dumps(DEFAULT_FW_CONFIG_DICT)
SERVICE_PROFILE_VENDOR = 'heat_based_node_driver'
def setUp(self):
config.cfg.CONF.set_override('stack_action_wait_time',
STACK_ACTION_WAIT_TIME,
group='heat_node_driver')
mock.patch(heatclient.__name__ + ".client.Client",
new=MockHeatClient).start()
super(HeatNodeDriverTestCase, self).setUp(
node_drivers=['heat_node_driver'],
node_plumber='agnostic_plumber',
core_plugin=test_gp_driver.CORE_PLUGIN)
def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs),
kwargs.values()))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(HeatNodeDriverTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)
def test_manager_initialized(self):
mgr = self.plugin.driver_manager
self.assertIsInstance(mgr.ordered_drivers[0].obj,
heat_node_driver.HeatNodeDriver)
for driver in mgr.ordered_drivers:
self.assertTrue(driver.obj.initialized)
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCER, shared_profile=False,
profile_tenant_id=None, profile_id=None, **kwargs):
if not profile_id:
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
vendor=self.SERVICE_PROFILE_VENDOR,
tenant_id=profile_tenant_id or self._tenant_id)[
'service_profile']
else:
prof = self.get_service_profile(profile_id)
service_config = kwargs.get('config')
if not service_config or service_config == '{}':
if service_type == constants.FIREWALL:
kwargs['config'] = self.DEFAULT_FW_CONFIG
else:
kwargs['config'] = self.DEFAULT_LB_CONFIG
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
class TestServiceChainInstance(HeatNodeDriverTestCase):
def _get_node_instance_stacks(self, sc_node_id):
context = neutron_context.get_admin_context()
with context.session.begin(subtransactions=True):
return (context.session.query(
heat_node_driver.ServiceNodeInstanceStack).
filter_by(sc_node_id=sc_node_id).
all())
def test_invalid_service_type_rejected(self):
node_used = self._create_profiled_servicechain_node(
service_type="test")['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
res = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec_used['id']],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NoDriverAvailableForAction',
res['NeutronError']['type'])
def test_node_create(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
self._create_simple_service_chain()
expected_stack_name = mock.ANY
expected_stack_params = mock.ANY
stack_create.assert_called_once_with(
expected_stack_name,
self.DEFAULT_LB_CONFIG_DICT,
expected_stack_params)
def _get_pool_member_resource_dict(self, port):
member_ip = port['fixed_ips'][0]['ip_address']
member_name = 'mem-' + member_ip
member = {member_name: {
'Type': 'OS::Neutron::PoolMember',
'Properties': {
'protocol_port': '80',
'admin_state_up': True,
'pool_id': {'Ref': u'test_pool'},
'weight': 1,
'address': member_ip
}
}
}
return member
def _create_policy_target_port(self, policy_target_group_id):
pt = self.create_policy_target(
policy_target_group_id=policy_target_group_id)['policy_target']
req = self.new_show_request('ports', pt['port_id'], fmt=self.fmt)
port = self.deserialize(self.fmt,
req.get_response(self.api))['port']
return (pt, port)
def _create_external_policy(self, consumed_prs, routes=None):
with self.network(router__external=True, shared=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
if not routes:
routes = [{'destination': '172.0.0.0/22', 'nexthop': None}]
self.create_external_segment(
shared=True,
name="default",
external_routes=routes,
subnet_id=sub['subnet']['id'])['external_segment']
return self.create_external_policy(
consumed_policy_rule_sets={consumed_prs: ''})
def _test_lb_node_create(self, consumer_external=False):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
node_id = self._create_profiled_servicechain_node(
service_type=constants.LOADBALANCER)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
_, port1 = self._create_policy_target_port(provider['id'])
_, port2 = self._create_policy_target_port(provider['id'])
if consumer_external:
self._create_external_policy(prs['id'])
else:
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
pool_member1 = self._get_pool_member_resource_dict(port1)
pool_member2 = self._get_pool_member_resource_dict(port2)
# Instantiating the chain invokes stack create
expected_stack_template = copy.deepcopy(
self.DEFAULT_LB_CONFIG_DICT)
expected_stack_template['Resources'].update(pool_member1)
expected_stack_template['Resources'].update(pool_member2)
expected_stack_name = mock.ANY
# TODO(Magesh): Verify expected_stack_params with IP address from
# Network Service Policy
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
return (expected_stack_template, provider,
created_stacks_map[0].stack_id)
def _test_lb_dynamic_pool_member_add(self, expected_stack_template,
provider, stack_id):
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
stack_update.return_value = {'stack': {
'id': stack_id}}
# Creating PT will update the node, thereby adding the PT as an
# LB Pool Member using heat stack
pt, port = self._create_policy_target_port(provider['id'])
pool_member = self._get_pool_member_resource_dict(port)
expected_stack_template['Resources'].update(pool_member)
expected_stack_id = stack_id
expected_stack_params = {}
stack_update.assert_called_once_with(
expected_stack_id,
expected_stack_template,
expected_stack_params)
return (pt, pool_member)
def _test_dynamic_lb_pool_member_delete(self, pt, pool_member,
expected_stack_template,
stack_id):
# Deleting PT will update the node, thereby removing the Pool
# Member from heat stack
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
self.delete_policy_target(pt['id'])
template_on_delete_pt = copy.deepcopy(expected_stack_template)
template_on_delete_pt['Resources'].pop(pool_member.keys()[0])
expected_stack_id = stack_id
expected_stack_params = {}
stack_update.assert_called_once_with(
expected_stack_id,
template_on_delete_pt,
expected_stack_params)
def _test_node_cleanup(self, ptg, stack_id):
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
self.update_policy_target_group(
ptg['id'], consumed_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(ptg['id'], expected_res_status=204)
stack_delete.assert_called_once_with(stack_id)
def test_lb_node_operations(self):
expected_stack_template, provider, stack_id = (
self._test_lb_node_create())
pt, pool_member = self._test_lb_dynamic_pool_member_add(
expected_stack_template, provider, stack_id)
self._test_dynamic_lb_pool_member_delete(
pt, pool_member, expected_stack_template, stack_id)
self._test_node_cleanup(provider, stack_id)
def test_lb_redirect_from_external(self):
expected_stack_template, provider, stack_id = (
self._test_lb_node_create(consumer_external=True))
pt, pool_member = self._test_lb_dynamic_pool_member_add(
expected_stack_template, provider, stack_id)
self._test_dynamic_lb_pool_member_delete(
pt, pool_member, expected_stack_template, stack_id)
self._test_node_cleanup(provider, stack_id)
def _create_fwredirect_ruleset(self, classifier_port, classifier_protocol):
node_id = self._create_profiled_servicechain_node(
service_type=constants.FIREWALL)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
action = self.create_policy_action(action_type='REDIRECT',
action_value=spec['id'])
classifier = self.create_policy_classifier(
port_range=classifier_port, protocol=classifier_protocol,
direction='bi')
rule = self.create_policy_rule(
policy_actions=[action['policy_action']['id']],
policy_classifier_id=classifier['policy_classifier']['id'])
rule = rule['policy_rule']
prs = self.create_policy_rule_set(policy_rules=[rule['id']])
return (prs['policy_rule_set'], node_id)
def _get_ptg_cidr(self, ptg):
req = self.new_show_request(
'subnets', ptg['subnets'][0], fmt=self.fmt)
ptg_subnet = self.deserialize(
self.fmt, req.get_response(self.api))['subnet']
return ptg_subnet['cidr']
def _get_firewall_rule_dict(self, rule_name, protocol, port, provider_cidr,
consumer_cidr):
fw_rule = {rule_name: {'type': "OS::Neutron::FirewallRule",
'properties': {
"protocol": protocol,
"enabled": True,
"destination_port": port,
"action": "allow",
"destination_ip_address": provider_cidr,
"source_ip_address": consumer_cidr
}
}
}
return fw_rule
def test_fw_node_east_west(self):
classifier_port = '66'
classifier_protocol = 'udp'
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prs, node_id = self._create_fwredirect_ruleset(
classifier_port, classifier_protocol)
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
stack_id = created_stacks_map[0].stack_id
provider_cidr = self._get_ptg_cidr(provider)
consumer_cidr = self._get_ptg_cidr(consumer)
fw_rule = self._get_firewall_rule_dict(
'Rule_1', classifier_protocol, classifier_port,
provider_cidr, consumer_cidr)
expected_stack_template = copy.deepcopy(
self.DEFAULT_FW_CONFIG_DICT)
expected_stack_template['resources'].update(fw_rule)
expected_stack_name = mock.ANY
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
self._test_node_cleanup(consumer, stack_id)
def _test_fw_node_north_south(self, consumer_cidrs):
classifier_port = '66'
classifier_protocol = 'udp'
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prs, node_id = self._create_fwredirect_ruleset(
classifier_port, classifier_protocol)
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
routes = []
for consumer_cidr in consumer_cidrs:
routes.append({'destination': consumer_cidr, 'nexthop': None})
self._create_external_policy(
prs['id'], routes=routes)['external_policy']
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
stack_id = created_stacks_map[0].stack_id
expected_stack_template = copy.deepcopy(
self.DEFAULT_FW_CONFIG_DICT)
expected_stack_template['resources']['test_fw_policy'][
'properties']['firewall_rules'] = []
provider_cidr = self._get_ptg_cidr(provider)
rule_num = 1
for consumer_cidr in consumer_cidrs:
rule_name = 'Rule_' + str(rule_num)
fw_rule = self._get_firewall_rule_dict(
rule_name, classifier_protocol, classifier_port,
provider_cidr, consumer_cidr)
rule_num = rule_num + 1
expected_stack_template['resources'].update(fw_rule)
expected_stack_template['resources']['test_fw_policy'][
'properties']['firewall_rules'].append(
{'get_resource': rule_name})
expected_stack_name = mock.ANY
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
self._test_node_cleanup(provider, stack_id)
def test_fw_node_north_south_single_external_cidr(self):
self._test_fw_node_north_south(['172.0.0.0/22'])
def test_fw_node_north_south_multiple_external_cidr(self):
self._test_fw_node_north_south(['172.0.0.0/22', '20.0.0.0/16'])
def test_node_update(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prof = self.create_service_profile(
service_type=constants.LOADBALANCER,
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
self._create_chain_with_nodes(node_ids=[node['id']])
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
self.update_servicechain_node(
node['id'],
name='newname',
expected_res_status=200)
# Name update should not update stack ??
stack_update.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
def test_node_delete(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
provider, _, _ = self._create_simple_service_chain()
with mock.patch.object(heatClient.HeatClient,
'delete'):
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
def test_wait_stack_delete_for_instance_delete(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
provider, _, _ = self._create_simple_service_chain()
# Verify that as part of delete service chain instance we call
# get method for heat stack 5 times before giving up if the state
# does not become DELETE_COMPLETE
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
with mock.patch.object(heatClient.HeatClient,
'get') as stack_get:
stack_get.return_value = MockStackObject('PENDING_DELETE')
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
self.assertEqual(STACK_ACTION_WAIT_TIME / 5,
stack_get.call_count)
# Create and delete another service chain instance and verify that
# we call get method for heat stack only once if the stack state
# is DELETE_COMPLETE
provider, _, _ = self._create_simple_service_chain()
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
with mock.patch.object(heatClient.HeatClient,
'get') as stack_get:
stack_get.return_value = MockStackObject(
'DELETE_COMPLETE')
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
self.assertEqual(1, stack_get.call_count)
def test_stack_not_found_ignored(self):
mock.patch(heatclient.__name__ + ".client.Client",
new=MockHeatClientDeleteNotFound).start()
provider, _, _ = self._create_simple_service_chain()
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)

View File

@ -50,6 +50,9 @@ GP_PLUGIN_KLASS = (
class NodeCompositionPluginTestCase(
test_base.TestGroupPolicyPluginGroupResources):
DEFAULT_LB_CONFIG = '{}'
SERVICE_PROFILE_VENDOR = 'dummy'
def setUp(self, core_plugin=None, gp_plugin=None, node_drivers=None,
node_plumber=None):
if node_drivers:
@ -91,12 +94,14 @@ class NodeCompositionPluginTestCase(
def _create_simple_service_chain(self, number_of_nodes=1):
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_ids = []
for x in xrange(number_of_nodes):
node_ids.append(self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id'])
return self._create_chain_with_nodes(node_ids)
@ -134,17 +139,21 @@ class NodeCompositionPluginTestCase(
# Verify Context attributes for simple config
plugin_context = n_context.get_admin_context()
profile = self.create_service_profile(
service_type="TYPE")['service_profile']
service_type="LOADBALANCER",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'], config='{}')['servicechain_node']
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
consumer = self.create_policy_target_group()['policy_target_group']
management = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id=consumer['id'],
servicechain_specs=[spec['id']])['servicechain_instance']
servicechain_specs=[spec['id']], classifier_id=classifier['id'])[
'servicechain_instance']
# Verify created without errors
ctx = ncp_context.get_node_driver_context(
@ -172,13 +181,16 @@ class NodeCompositionPluginTestCase(
def test_context_relevant_specs(self):
plugin_context = n_context.get_admin_context()
node_used = self._create_profiled_servicechain_node(
service_type="TYPE", config='{}')['servicechain_node']
service_type="LOADBALANCER",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec_used['id']])['servicechain_instance']
ctx = ncp_context.get_node_driver_context(
@ -208,6 +220,7 @@ class NodeCompositionPluginTestCase(
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
vendor=self.SERVICE_PROFILE_VENDOR,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters
@ -275,10 +288,12 @@ class NodeCompositionPluginTestCase(
resource='node', msg='reason')
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
@ -298,10 +313,12 @@ class NodeCompositionPluginTestCase(
def test_update_instantiated_profile_fails(self):
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
@ -327,16 +344,20 @@ class NodeCompositionPluginTestCase(
# This happens without error
profile = self.create_service_profile(
service_type="TYPE")['service_profile']
service_type="TYPE",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'], config='{}')['servicechain_node']
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
consumer = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id=consumer['id'],
servicechain_specs=[spec['id']], expected_res_status=201)
servicechain_specs=[spec['id']], classifier_id=classifier['id'],
expected_res_status=201)
def test_chain_fails_if_no_drivers_available(self):
self._add_node_driver('test')
@ -348,23 +369,29 @@ class NodeCompositionPluginTestCase(
create_2.side_effect = n_exc.NeutronException()
profile = self.create_service_profile(
service_type="TYPE")['service_profile']
service_type="TYPE",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'], config='{}')['servicechain_node']
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
consumer = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id=consumer['id'],
servicechain_specs=[spec['id']], expected_res_status=400)
servicechain_specs=[spec['id']], classifier_id=classifier['id'],
expected_res_status=400)
def test_multiple_nodes_update(self):
update = self.driver.update = mock.Mock()
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'], config='{}')['servicechain_node']
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
self._create_chain_with_nodes([node['id']])
self.update_servicechain_node(node['id'], name='somethingelse')
@ -376,14 +403,45 @@ class NodeCompositionPluginTestCase(
self.update_servicechain_node(node['id'], name='somethingelse')
self.assertEqual(3, update.call_count)
def test_instantiated_spec_node_update_rejected(self):
prof = self.create_service_profile(
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node1_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
node2_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node1_id, node2_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
res = self.update_servicechain_spec(spec['id'],
nodes=[node1_id],
expected_res_status=400)
self.assertEqual('InuseSpecNodeUpdateNotAllowed',
res['NeutronError']['type'])
def test_relevant_ptg_update(self):
add = self.driver.update_policy_target_added = mock.Mock()
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
@ -422,9 +480,11 @@ class NodeCompositionPluginTestCase(
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
service_type='LOADBALANCER',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
@ -466,7 +526,9 @@ class AgnosticChainPlumberTestCase(NodeCompositionPluginTestCase):
self.driver.get_plumbing_info.return_value = {}
def _create_simple_chain(self):
node = self._create_profiled_servicechain_node()['servicechain_node']
node = self._create_profiled_servicechain_node(
service_type="LOADBALANCER",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']

View File

@ -43,8 +43,11 @@ function confirm_server_active {
fi
}
gbp servicechain-node-create loadbalancer-node --template-file $TOP_DIR/gbp-templates/firewall-lb-servicechain/fw.template --servicetype FIREWALL
gbp servicechain-node-create firewall-node --template-file $TOP_DIR/gbp-templates/firewall-lb-servicechain/lb.template --servicetype LOADBALANCER
gbp service-profile-create --vendor heat_based_node_driver --insertion-mode l3 --servicetype FIREWALL fw-profile
gbp service-profile-create --vendor heat_based_node_driver --insertion-mode l3 --servicetype LOADBALANCER lb-profile
gbp servicechain-node-create loadbalancer-node --template-file $TOP_DIR/gbp-templates/firewall-lb-servicechain/lb.template --service-profile lb-profile
gbp servicechain-node-create firewall-node --template-file $TOP_DIR/gbp-templates/firewall-lb-servicechain/fw.template --service-profile fw-profile
gbp servicechain-spec-create firewall-loadbalancer-spec --description spec --nodes "firewall-node loadbalancer-node"
@ -143,6 +146,9 @@ gbp servicechain-spec-delete firewall-loadbalancer-spec
gbp servicechain-node-delete loadbalancer-node
gbp servicechain-node-delete firewall-node
gbp service-profile-delete lb-profile
gbp service-profile-delete fw-profile
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End DevStack Exercise: $0"

View File

@ -7,14 +7,10 @@
"Description": "Pool Subnet CIDR, on which VIP port should be created",
"Type": "String"
},
"PoolMemberIPs": {
"Description": "Pool Member IP Address",
"Type": "String"
},
"vip_ip": {
"Description": "VIP IP Address",
"Type": "String"
}
"vip_ip": {
"Description": "VIP IP Address",
"Type": "String"
}
},
"Resources" : {
@ -42,13 +38,13 @@
"protocol": "HTTP",
"subnet_id": {"Ref":"Subnet"},
"vip": {
"subnet": {"Ref":"Subnet"},
"address": {"Ref":"vip_ip"},
"name": "Haproxy vip",
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": true,
"description": "Haproxy vip from template"
"subnet": {"Ref":"Subnet"},
"address": {"Ref":"vip_ip"},
"name": "Haproxy vip",
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": true,
"description": "Haproxy vip from template"
}
}
},
@ -58,16 +54,6 @@
"pool_id": {"Ref":"HaproxyPool"},
"protocol_port": 80
}
},
"Member1": {
"Type": "OS::Neutron::PoolMember",
"Properties": {
"address": {"Ref":"PoolMemberIPs"},
"admin_state_up": true,
"pool_id": {"Ref":"HaproxyPool"},
"protocol_port": 80,
"weight": 1
}
}
}
}

View File

@ -5,7 +5,8 @@ RABBIT_PASSWORD=abc123
SERVICE_PASSWORD=$ADMIN_PASSWORD
SERVICE_TOKEN=abc123
Q_SERVICE_PLUGIN_CLASSES=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,group_policy,servicechain
Q_SERVICE_PLUGIN_CLASSES=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,group_policy,ncp
# Using group-policy branches
# ---------------------------
@ -79,6 +80,10 @@ policy_drivers=implicit_policy,resource_mapping
servicechain_drivers = simplechain_driver
#servicechain_drivers = chain_with_two_arm_appliance_driver
[node_composition_plugin]
node_plumber = agnostic_plumber
node_drivers = heat_node_driver
[quotas]
default_quota = -1
quota_network = -1

View File

@ -61,6 +61,7 @@ function check_residual_resources {
gbp servicechain-instance-list
gbp servicechain-node-list
gbp servicechain-spec-list
gbp service-profile-list
gbp network-service-policy-list
gbp nat-pool-list
gbp external-policy-list

View File

@ -59,6 +59,7 @@ gbpservice.neutron.servicechain.servicechain_drivers =
oneconvergence_servicechain_driver = gbpservice.neutron.services.servicechain.plugins.msc.drivers.oneconvergence_servicechain_driver:OneconvergenceServiceChainDriver
gbpservice.neutron.servicechain.ncp_drivers =
node_dummy = gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers.dummy_driver:NoopNodeDriver
heat_node_driver = gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers.heat_node_driver:HeatNodeDriver
gbpservice.neutron.servicechain.ncp_plumbers =
dummy_plumber = gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers.dummy_plumber:NoopPlumber
agnostic_plumber = gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers.chain_agnostic_plumber:ChainAgnosticPlumber