Removed_legacy_service_chain_code

Change-Id: Ie7bcf691a48bcaedb9f5a8413136592608eb3897
This commit is contained in:
pulkitvajpayee07 2022-04-11 10:49:03 +05:30
parent ceeeb71a0a
commit e170eae3e0
214 changed files with 0 additions and 45606 deletions

View File

@ -1,189 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.config_orchestrator.common import (
topics as a_topics)
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
from gbpservice.nfp.orchestrator.openstack import openstack_driver
from neutron_lib.agent import topics as n_topics
from neutron_lib import constants as n_constants
from neutron_lib import rpc as n_rpc
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
def prepare_request_data(context, resource, resource_type,
resource_data, service_vendor=None):
request_data = {'info': {
'context': context,
'service_type': resource_type,
'service_vendor': service_vendor
},
'config': [{
'resource': resource,
'resource_data': resource_data
}]
}
return request_data
def _filter_data(routers, networks, filters):
# filter routers and networks data and formulate
# dictionary of subnets, routers and ports for the
# given tenant.
tenant_id = filters['tenant_id'][0]
_filtered_routers = []
_filtered_subnets = []
_filtered_ports = []
_filtered_networks = []
for router in routers:
if router['tenant_id'] == tenant_id:
_filtered_routers.append({'id': router['id']})
for network in networks:
if network['tenant_id'] == tenant_id:
subnets = network['subnets']
ports = network['ports']
_filtered_networks.append(
{'id': network['id'],
'tenant_id': network['tenant_id'],
'provider:segmentation_id': network[
'provider:segmentation_id'],
'provider:network_type': network[
'provider:network_type'],
'shared': network['shared'],
'router:external': network['router:external']})
for subnet in subnets:
if subnet['tenant_id'] == tenant_id:
_filtered_subnets.append(
{'id': subnet['id'],
'cidr': subnet['cidr'],
'gateway_ip': subnet['gateway_ip'],
'network_id': subnet['network_id']})
for port in ports:
if port['tenant_id'] == tenant_id:
_filtered_ports.append(
{'id': port['id'],
'fixed_ips': port['fixed_ips'],
'binding:host_id': port['binding:host_id'],
'network_id': port['network_id']})
return {'subnets': _filtered_subnets,
'routers': _filtered_routers,
'ports': _filtered_ports,
'networks': _filtered_networks}
def get_core_context(context, filters, config):
routers = []
networks = get_networks(context, config)
return _filter_data(routers, networks, filters)
def get_dhcp_agent_host(config):
try:
neutronclient = openstack_driver.NeutronClient(config)
keystoneclient = openstack_driver.KeystoneClient(config)
token = keystoneclient.get_admin_token()
filters = {'agent_type': 'DHCP agent', 'alive': True}
agents = neutronclient.get_agents(token, filters)
if agents:
return agents[0].get('host', None)
except Exception as exc:
LOG.error("Failed to get dhcp agent host : %(exc)s",
{'exc': exc})
def get_networks(context, config):
host = get_dhcp_agent_host(config)
if not host:
return []
target = messaging.Target(
topic=n_topics.PLUGIN,
namespace=n_constants.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.0')
client = n_rpc.get_client(target)
cctxt = client.prepare(version='1.1')
return cctxt.call(context, 'get_active_networks_info',
host=host)
def _prepare_structure(network_function_details, ports_info,
mngmt_port_info, monitor_port_info):
return {'nfi_ports_map': {
network_function_details[
'network_function_instance'][
'id']: ports_info},
'nfi_nfd_map': {
network_function_details[
'network_function_instance'][
'id']: {
'nfd': network_function_details[
'network_function_device'],
'nfd_mgmt_port': mngmt_port_info,
'nfd_monitoring_port': None,
'nfd_monitoring_port_network': network_function_details[
'network_function_device'][
'monitoring_port_network']}},
'nfi': [network_function_details['network_function_instance']],
'nf': network_function_details['network_function']
}
def get_network_function_details(context, network_function_id):
network_function_details = None
try:
rpc_nso_client = transport.RPCClient(a_topics.NFP_NSO_TOPIC)
network_function_details = rpc_nso_client.cctxt.call(
context,
'get_network_function_details',
network_function_id=network_function_id)
msg = (" %s " % (network_function_details))
LOG.debug(msg)
return network_function_details['network_function']
except Exception as e:
LOG.error("Failed to get network function details of "
"network_function_id %(network_function_id)s : %(ex)s ",
{'ex': e, 'network_function_id': network_function_id})
def get_network_function_map(context, network_function_id):
request_data = None
try:
rpc_nso_client = transport.RPCClient(a_topics.NFP_NSO_TOPIC)
nf_context = rpc_nso_client.cctxt.call(
context,
'get_network_function_context',
network_function_id=network_function_id)
network_function_details = nf_context['network_function_details']
ports_info = nf_context['ports_info']
mngmt_port_info = nf_context['mngmt_port_info']
monitor_port_info = nf_context['monitor_port_info']
request_data = _prepare_structure(network_function_details, ports_info,
mngmt_port_info, monitor_port_info)
msg = (" %s " % (request_data))
LOG.debug(msg)
return request_data
except Exception as e:
LOG.error("Failed to get network function map of "
"network_function_id %(network_function_id)s : %(ex)s ",
{'ex': e, 'network_function_id': network_function_id})
return request_data

View File

@ -1,20 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Constants to extend status strings in neutron.plugins.common.constants
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
DEGRADED = 'DEGRADED'
DISABLED = 'DISABLED'
NO_MONITOR = 'NO_MONITOR'
PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS'

View File

@ -1,23 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
FW_NFP_CONFIGAGENT_TOPIC = 'nfp-firewall-agent'
LBV2_NFP_CONFIGAGENT_TOPIC = 'nfp-lbaasv2-agent'
VPN_NFP_CONFIGAGENT_TOPIC = 'nfp-vpn_agent'
NFP_NSO_TOPIC = "nfp-service-orchestrator"
FW_NFP_PLUGIN_TOPIC = 'q-firewall-plugin'
LBV2_NFP_PLUGIN_TOPIC = 'n-lbaasv2-plugin'
VPN_NFP_PLUGIN_TOPIC = 'vpn_plugin'
DEVICE_ORCH_TOPIC = 'nfp-configurator-ndo'
SERVICE_ORCH_TOPIC = 'nfp-configurator-nso'
CONFIG_ORCH_TOPIC = 'nfp-nco-notification-topic'

View File

@ -1,179 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df
from gbpservice.nfp.common import utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
from neutron_fwaas.db.firewall import firewall_db
from oslo_log import helpers as log_helpers
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
"""
RPC handler for Firewall service
"""
class FwAgent(firewall_db.Firewall_db_mixin):
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, conf, sc):
super(FwAgent, self).__init__()
self._conf = conf
self._sc = sc
self._db_inst = super(FwAgent, self)
def _get_firewalls(self, context, tenant_id,
firewall_policy_id, description):
filters = {'tenant_id': [tenant_id],
'firewall_policy_id': [firewall_policy_id]}
args = {'context': context, 'filters': filters}
firewalls = self._db_inst.get_firewalls(**args)
for firewall in firewalls:
firewall['description'] = description
return firewalls
def _get_firewall_policies(self, context, tenant_id,
firewall_policy_id, description):
filters = {'tenant_id': [tenant_id],
'id': [firewall_policy_id]}
args = {'context': context, 'filters': filters}
firewall_policies = self._db_inst.get_firewall_policies(**args)
return firewall_policies
def _get_firewall_rules(self, context, tenant_id,
firewall_policy_id, description):
filters = {'tenant_id': [tenant_id],
'firewall_policy_id': [firewall_policy_id]}
args = {'context': context, 'filters': filters}
firewall_rules = self._db_inst.get_firewall_rules(**args)
return firewall_rules
def _get_firewall_context(self, **kwargs):
firewalls = self._get_firewalls(**kwargs)
firewall_policies = self._get_firewall_policies(**kwargs)
firewall_rules = self._get_firewall_rules(**kwargs)
return {'firewalls': firewalls,
'firewall_policies': firewall_policies,
'firewall_rules': firewall_rules}
def _context(self, **kwargs):
context = kwargs.get('context')
if context.is_admin:
kwargs['tenant_id'] = context.tenant_id
db = self._get_firewall_context(**kwargs)
return db
def _prepare_resource_context_dicts(self, **kwargs):
# Prepare context_dict
context = kwargs.get('context')
context_resource_data = kwargs.pop('context_resource_data')
ctx_dict = context.to_dict()
# Collecting db entry required by configurator.
# Addind service_info to neutron context and sending
# dictionary format to the configurator.
db = self._context(**kwargs)
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
rsrc_ctx_dict.update({'service_info': db})
rsrc_ctx_dict.update({'resource_data': context_resource_data})
return ctx_dict, rsrc_ctx_dict
def _get_resource_data(self, description, resource_type):
resource_data = df.get_network_function_info(description,
resource_type)
return resource_data
def _update_request_data(self, body, description):
pass
def _data_wrapper(self, context, firewall, host, nf, reason):
# Hardcoding the position for fetching data since we are owning
# its positional change
description = ast.literal_eval((nf['description'].split('\n'))[1])
description.update({'tenant_id': firewall['tenant_id']})
context_resource_data = self._get_resource_data(description,
const.FIREWALL)
fw_mac = description['provider_ptg_info'][0]
# REVISIT(dpak): We need to avoid resource description
# dependency in OTC and instead use neutron context description.
firewall.update({'description': str(description)})
kwargs = {'context': context,
'context_resource_data': context_resource_data,
'firewall_policy_id': firewall[
'firewall_policy_id'],
'description': str(description),
'tenant_id': firewall['tenant_id']}
ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(
**kwargs)
service_vm_context = utils.get_service_vm_context(
description['service_vendor'])
nfp_context = {'network_function_id': nf['id'],
'neutron_context': ctx_dict,
'fw_mac': fw_mac,
'requester': 'nas_service',
'logging_context': module_context.get()['log_context'],
'service_vm_context': service_vm_context}
resource = resource_type = 'firewall'
resource_data = {resource: firewall,
'host': host,
'neutron_context': rsrc_ctx_dict}
body = common.prepare_request_data(nfp_context, resource,
resource_type, resource_data,
description['service_vendor'])
self._update_request_data(body, description)
return body
def _fetch_nf_from_resource_desc(self, desc):
desc_dict = ast.literal_eval(desc)
nf_id = desc_dict['network_function_id']
return nf_id
@log_helpers.log_method_call
def create_firewall(self, context, firewall, host):
nfp_context = module_context.init()
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(firewall["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
LOG.info("Received RPC CREATE FIREWALL for "
"Firewall: %(firewall)s",
{'firewall': firewall})
body = self._data_wrapper(context, firewall, host, nf, 'CREATE')
transport.send_request_to_configurator(self._conf,
context, body, "CREATE")
@log_helpers.log_method_call
def delete_firewall(self, context, firewall, host):
nfp_context = module_context.init()
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(firewall["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
LOG.info("Received RPC DELETE FIREWALL for "
"Firewall: %(firewall)s",
{'firewall': firewall})
body = self._data_wrapper(context, firewall, host, nf, 'DELETE')
transport.send_request_to_configurator(self._conf,
context, body, "DELETE")

View File

@ -1,523 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.contrib.nfp.config_orchestrator.common import lbv2_constants
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df
from gbpservice.nfp.common import utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
from neutron_lbaas.common import cert_manager
from neutron_lbaas.common.tls_utils import cert_parser
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2
from neutron_lbaas.extensions import loadbalancerv2
from oslo_log import helpers as log_helpers
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
"""
RPC handler for Loadbalancer service
"""
class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
target = messaging.Target(version=const.LOADBALANCERV2_RPC_API_VERSION)
def __init__(self, conf, sc):
super(Lbv2Agent, self).__init__()
self._conf = conf
self._sc = sc
self._cert_manager_plugin = cert_manager.get_backend()
self._db_inst = super(Lbv2Agent, self)
def _filter_service_info_with_resource(self, lb_db, core_db):
updated_db = {'subnets': [],
'ports': []}
for lb in lb_db['loadbalancers']:
lb_port_id = lb['vip_port_id']
lb_subnet_id = lb['vip_subnet_id']
for subnet in core_db['subnets']:
if subnet['id'] == lb_subnet_id:
updated_db['subnets'].append(subnet)
for port in core_db['ports']:
if port['id'] == lb_port_id:
updated_db['ports'].append(port)
lb_db.update(updated_db)
return lb_db
def _to_api_dict(self, objs):
ret_list = []
for obj in objs:
ret_list.append(obj.to_api_dict())
return ret_list
def _get_core_context(self, context, tenant_id):
filters = {'tenant_id': [tenant_id]}
core_context_dict = common.get_core_context(context,
filters,
self._conf)
del core_context_dict['routers']
return core_context_dict
def _get_lb_context(self, context, filters):
args = {'context': context, 'filters': filters}
db_data = super(Lbv2Agent, self)
return {'loadbalancers': self._to_api_dict(
db_data.get_loadbalancers(**args)),
'listeners': self._to_api_dict(
db_data.get_listeners(**args)),
'pools': self._to_api_dict(
db_data.get_pools(**args)),
'pool_members': self._to_api_dict(
db_data.get_pool_members(**args)),
'healthmonitors': self._to_api_dict(
db_data.get_healthmonitors(**args))}
def _context(self, **kwargs):
context = kwargs.get('context')
if context.is_admin:
kwargs['tenant_id'] = context.tenant_id
core_db = self._get_core_context(context, kwargs['tenant_id'])
# REVISIT(jiahao): _get_lb_context() fails for flavor_id, disable it
# for now. Sent the whole core_db to configurator
# lb_db = self._get_lb_context(**kwargs)
# db = self._filter_service_info_with_resource(lb_db, core_db)
db = core_db
return db
def _prepare_resource_context_dicts(self, **kwargs):
# Prepare context_dict
context = kwargs.get('context')
context_resource_data = kwargs.pop('context_resource_data')
ctx_dict = context.to_dict()
# Collecting db entry required by configurator.
# Addind service_info to neutron context and sending
# dictionary format to the configurator.
db = self._context(**kwargs)
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
rsrc_ctx_dict.update({'service_info': db})
rsrc_ctx_dict.update({'resource_data': context_resource_data})
return ctx_dict, rsrc_ctx_dict
def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs):
nfp_context = {}
description = ast.literal_eval((nf['description'].split('\n'))[1])
description.update({'tenant_id': tenant_id})
context_resource_data = df.get_network_function_info(
description, const.LOADBALANCERV2)
# REVISIT(dpak): We need to avoid resource description
# dependency in OTC and instead use neutron context description.
if name.lower() == 'loadbalancer':
lb_id = kwargs['loadbalancer']['id']
kwargs['loadbalancer'].update({'description': str(description)})
nfp_context = {'network_function_id': nf['id'],
'loadbalancer_id': kwargs['loadbalancer']['id']}
elif name.lower() == 'listener':
lb_id = kwargs['listener'].get('loadbalancer_id')
kwargs['listener']['description'] = str(description)
elif name.lower() == 'pool':
lb_id = kwargs['pool'].get('loadbalancer_id')
kwargs['pool']['description'] = str(description)
elif name.lower() == 'member':
pool = kwargs['member'].get('pool')
if pool:
lb_id = pool.get('loadbalancer_id')
kwargs['member']['description'] = str(description)
elif name.lower() == 'healthmonitor':
pool = kwargs['healthmonitor'].get('pool')
if pool:
lb_id = pool.get('loadbalancer_id')
kwargs['healthmonitor']['description'] = str(description)
else:
kwargs[name.lower()].update({'description': str(description)})
lb_id = kwargs[name.lower()].get('loadbalancer_id')
args = {'tenant_id': tenant_id,
'lb_id': lb_id,
'context': context,
'description': str(description),
'context_resource_data': context_resource_data}
ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(**args)
service_vm_context = utils.get_service_vm_context(
description['service_vendor'])
nfp_context.update({'neutron_context': ctx_dict,
'requester': 'nas_service',
'logging_context':
module_context.get()['log_context'],
'service_vm_context': service_vm_context})
resource_type = 'loadbalancerv2'
resource = name
resource_data = {'neutron_context': rsrc_ctx_dict}
resource_data.update(**kwargs)
body = common.prepare_request_data(nfp_context, resource,
resource_type, resource_data,
description['service_vendor'])
return body
def _post(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'CREATE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "CREATE")
def _put(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'UPDATE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "UPDATE")
def _delete(self, context, tenant_id, name, nf, **kwargs):
body = self._data_wrapper(context, tenant_id, name,
'DELETE', nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body, "DELETE")
def _fetch_nf_from_resource_desc(self, desc):
desc_dict = ast.literal_eval(desc)
nf_id = desc_dict['network_function_id']
return nf_id
def _get_primary_cn(self, tls_cert):
"""Returns primary CN for Certificate."""
return cert_parser.get_host_names(tls_cert.get_certificate())['cn']
@staticmethod
def _get_listeners_dict_list(resource_type, resource_dict):
if resource_type.lower() == 'loadbalancer':
listeners = resource_dict['listeners']
elif resource_type.lower() == 'listener':
listeners = [resource_dict]
elif resource_type.lower() == 'pool':
listeners = resource_dict['listeners']
elif resource_type.lower() == 'member':
listeners = resource_dict['pool']['listeners']
elif resource_type.lower() == 'healthmonitor':
listeners = resource_dict['pool']['listeners']
else:
listeners = []
return listeners
def _update_tls_cert(self, resource_type, resource_dict):
listeners = self._get_listeners_dict_list(resource_type, resource_dict)
for listener in listeners:
if listener['protocol'] != \
lbv2_constants.PROTOCOL_TERMINATED_HTTPS:
continue
cert_mgr = self._cert_manager_plugin.CertManager()
lb_id = listener.get('loadbalancer_id')
tenant_id = listener.get('tenant_id')
def get_cert(cont_id):
try:
cert_cont = cert_mgr.get_cert(
project_id=tenant_id,
cert_ref=cont_id,
resource_ref=cert_mgr.get_service_url(lb_id),
check_only=True
)
return cert_cont
except Exception as e:
if hasattr(e, 'status_code') and e.status_code == 404:
raise loadbalancerv2.TLSContainerNotFound(
container_id=cont_id)
else:
# Could be a keystone configuration error...
raise loadbalancerv2.CertManagerError(
ref=cont_id, reason=e.message
)
def build_container_dict(cont_id, cert_cont):
return {
"id": cont_id,
"primary_cn": self._get_primary_cn(cert_cont),
"private_key": cert_cont.get_private_key(),
"certificate": cert_cont.get_certificate(),
"intermediates": cert_cont.get_intermediates()
}
if not listener['default_tls_container_id']:
raise loadbalancerv2.TLSDefaultContainerNotSpecified()
else:
container_id = listener['default_tls_container_id']
cert_container = get_cert(container_id)
container_dict = \
build_container_dict(container_id, cert_container)
listener["default_tls_container"] = container_dict
for container in listener.get("sni_containers"):
container_id = container["tls_container_id"]
cert_container = get_cert(container_id)
container_dict = \
build_container_dict(container_id, cert_container)
container["tls_container"] = container_dict
# REVISIT(jiahao): Argument allocate_vip and
# delete_vip_port are not implememnted.
@log_helpers.log_method_call
def create_loadbalancer(self, context, loadbalancer, driver_name,
allocate_vip=True):
nfp_context = module_context.init()
LOG.info("Received RPC CREATE LOADBALANCER for LB:%(lb)s",
{'lb': loadbalancer})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('loadbalancer', loadbalancer)
self._post(
context, loadbalancer['tenant_id'],
'loadbalancer', nf,
loadbalancer=loadbalancer, driver_name=driver_name)
@log_helpers.log_method_call
def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):
nfp_context = module_context.init()
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('loadbalancer', loadbalancer)
self._put(
context, loadbalancer['tenant_id'],
'loadbalancer', nf,
old_loadbalancer=old_loadbalancer, loadbalancer=loadbalancer)
@log_helpers.log_method_call
def delete_loadbalancer(self, context, loadbalancer,
delete_vip_port=True):
nfp_context = module_context.init()
LOG.info("Received RPC DELETE LOADBALANCER for LB:"
"%(lb)s", {'lb': loadbalancer})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('loadbalancer', loadbalancer)
self._delete(
context, loadbalancer['tenant_id'],
'loadbalancer', nf, loadbalancer=loadbalancer)
@log_helpers.log_method_call
def create_listener(self, context, listener):
nfp_context = module_context.init()
LOG.info("Received RPC CREATE LISTENER for Listener:%(listener)s",
{'listener': listener})
loadbalancer = listener['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('listener', listener)
self._post(
context, listener['tenant_id'],
'listener', nf, listener=listener)
@log_helpers.log_method_call
def update_listener(self, context, old_listener, listener):
nfp_context = module_context.init()
loadbalancer = listener['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('listener', listener)
self._put(
context, listener['tenant_id'],
'listener', nf, old_listener=old_listener, listener=listener)
@log_helpers.log_method_call
def delete_listener(self, context, listener):
nfp_context = module_context.init()
LOG.info("Received RPC DELETE LISTENER for Listener:%(listener)s",
{'listener': listener})
loadbalancer = listener['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('listener', listener)
self._delete(
context, listener['tenant_id'],
'listener', nf, listener=listener)
@log_helpers.log_method_call
def create_pool(self, context, pool):
nfp_context = module_context.init()
LOG.info("Received RPC CREATE POOL for Pool:%(pool)s",
{'pool': pool})
loadbalancer = pool['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('pool', pool)
self._post(
context, pool['tenant_id'],
'pool', nf, pool=pool)
@log_helpers.log_method_call
def update_pool(self, context, old_pool, pool):
nfp_context = module_context.init()
loadbalancer = pool['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('pool', pool)
self._put(
context, pool['tenant_id'],
'pool', nf, old_pool=old_pool, pool=pool)
@log_helpers.log_method_call
def delete_pool(self, context, pool):
nfp_context = module_context.init()
LOG.info("Received RPC DELETE POOL for Pool:%(pool)s",
{'pool': pool})
loadbalancer = pool['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('pool', pool)
self._delete(
context, pool['tenant_id'],
'pool', nf, pool=pool)
@log_helpers.log_method_call
def create_member(self, context, member):
nfp_context = module_context.init()
LOG.info("Received RPC CREATE MEMBER for Member:%(member)s",
{'member': member})
loadbalancer = member['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('member', member)
self._post(
context, member['tenant_id'],
'member', nf, member=member)
@log_helpers.log_method_call
def update_member(self, context, old_member, member):
nfp_context = module_context.init()
loadbalancer = member['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('member', member)
self._put(
context, member['tenant_id'],
'member', nf, old_member=old_member, member=member)
@log_helpers.log_method_call
def delete_member(self, context, member):
nfp_context = module_context.init()
LOG.info("Received RPC DELETE MEMBER for Member:%(member)s",
{'member': member})
loadbalancer = member['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('member', member)
self._delete(
context, member['tenant_id'],
'member', nf, member=member)
@log_helpers.log_method_call
def create_healthmonitor(self, context, healthmonitor):
nfp_context = module_context.init()
LOG.info("Received RPC CREATE HEALTH MONITOR for HM:%(hm)s",
{'hm': healthmonitor})
loadbalancer = healthmonitor['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('healthmonitor', healthmonitor)
self._post(
context, healthmonitor['tenant_id'],
'healthmonitor', nf, healthmonitor=healthmonitor)
@log_helpers.log_method_call
def update_healthmonitor(self, context, old_healthmonitor, healthmonitor):
nfp_context = module_context.init()
loadbalancer = healthmonitor['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('healthmonitor', healthmonitor)
self._put(
context, healthmonitor['tenant_id'],
'healthmonitor', nf,
old_healthmonitor=old_healthmonitor, healthmonitor=healthmonitor)
@log_helpers.log_method_call
def delete_healthmonitor(self, context, healthmonitor):
nfp_context = module_context.init()
LOG.info("Received RPC DELETE HEALTH MONITOR for HM:%(hm)s",
{'hm': healthmonitor})
loadbalancer = healthmonitor['pool']['loadbalancer']
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
self._update_tls_cert('healthmonitor', healthmonitor)
self._delete(
context, healthmonitor['tenant_id'],
'healthmonitor', nf, healthmonitor=healthmonitor)
# REVISIT(jiahao): L7policy support not implemented
# disable L7policy
# def create_l7policy(self, context, l7policy):
# self._post(
# context, l7policy['tenant_id'],
# 'l7policy', l7policy=l7policy)
#
# def delete_l7policy(self, context, l7policy):
# self._delete(
# context, l7policy['tenant_id'],
# 'l7policy', l7policy=l7policy)
#
# def create_l7policy_rule(self, context, rule, l7policy_id):
# self._post(
# context, rule['tenant_id'],
# 'rule', rule=rule)
#
# def delete_l7policy_rule(self, context, rule):
# self._delete(
# context, rule['tenant_id'],
# 'rule', rule=rule)
#
# def _get_lb_context(self, context, filters):
# args = {'context': context, 'filters': filters}
# db_data = super(Lbv2Agent, self)
# return {'loadbalancers': db_data.get_loadbalancers(**args),
# 'listeners': db_data.get_listeners(**args),
# 'pools': db_data.get_pools(**args),
# 'pool_members': db_data.get_pool_members(**args),
# 'healthmonitors': db_data.get_healthmonitors(**args),
# 'l7policies': db_data.get_l7policies(**args),
# 'l7policy_rules': db_data.get_l7policy_rules(**args)}

View File

@ -1,198 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df
from gbpservice.nfp.common import utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
from neutron_vpnaas.db.vpn import vpn_db
from oslo_log import helpers as log_helpers
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
"""
RPC handler for VPN service
"""
class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, conf, sc):
super(VpnAgent, self).__init__()
self._conf = conf
self._sc = sc
self._db_inst = super(VpnAgent, self)
def _get_vpn_context(self, context, tenant_id, vpnservice_id,
ikepolicy_id, ipsecpolicy_id,
ipsec_site_conn_id, desc):
vpnservices = self._get_vpnservices(context, tenant_id,
vpnservice_id, desc)
ikepolicies = self._get_ikepolicies(context, tenant_id,
ikepolicy_id)
ipsecpolicies = self._get_ipsecpolicies(context, tenant_id,
ipsecpolicy_id)
ipsec_site_conns = self._get_ipsec_site_conns(context, tenant_id,
ipsec_site_conn_id, desc)
return {'vpnservices': vpnservices,
'ikepolicies': ikepolicies,
'ipsecpolicies': ipsecpolicies,
'ipsec_site_conns': ipsec_site_conns}
def _context(self, context, tenant_id, resource, resource_data):
if context.is_admin:
tenant_id = context.tenant_id
if resource.lower() == 'ipsec_site_connection':
vpn_ctx_db = self._get_vpn_context(context,
tenant_id,
resource_data[
'vpnservice_id'],
resource_data[
'ikepolicy_id'],
resource_data[
'ipsecpolicy_id'],
resource_data['id'],
resource_data[
'description'])
return vpn_ctx_db
elif resource.lower() == 'vpn_service':
return {'vpnservices': [resource_data]}
else:
return None
def _prepare_resource_context_dicts(self, context, tenant_id,
resource, resource_data,
context_resource_data):
# Prepare context_dict
ctx_dict = context.to_dict()
# Collecting db entry required by configurator.
# Addind service_info to neutron context and sending
# dictionary format to the configurator.
db = self._context(context, tenant_id, resource,
resource_data)
rsrc_ctx_dict = copy.deepcopy(ctx_dict)
rsrc_ctx_dict.update({'service_info': db})
rsrc_ctx_dict.update({'resource_data': context_resource_data})
return ctx_dict, rsrc_ctx_dict
def _get_resource_data(self, description, resource_type):
resource_data = df.get_network_function_info(description,
resource_type)
return resource_data
def _update_request_data(self, body, description):
pass
def _data_wrapper(self, context, tenant_id, nf, **kwargs):
nfp_context = {}
description, str_description = (
utils.get_vpn_description_from_nf(nf))
description.update({'tenant_id': tenant_id})
context_resource_data = self._get_resource_data(description,
const.VPN)
resource = kwargs['rsrc_type']
resource_data = kwargs['resource']
# REVISIT(dpak): We need to avoid resource description
# dependency in OTC and instead use neutron context description.
resource_data['description'] = str_description
if resource.lower() == 'ipsec_site_connection':
nfp_context = {'network_function_id': nf['id'],
'ipsec_site_connection_id': kwargs[
'rsrc_id']}
ctx_dict, rsrc_ctx_dict = self.\
_prepare_resource_context_dicts(context, tenant_id,
resource, resource_data,
context_resource_data)
service_vm_context = utils.get_service_vm_context(
description['service_vendor'])
nfp_context.update({'neutron_context': ctx_dict,
'service_vm_context': service_vm_context,
'requester': 'nas_service',
'logging_context':
module_context.get()['log_context']})
resource_type = 'vpn'
kwargs.update({'neutron_context': rsrc_ctx_dict})
body = common.prepare_request_data(nfp_context, resource,
resource_type, kwargs,
description['service_vendor'])
self._update_request_data(body, description)
return body
def _fetch_nf_from_resource_desc(self, desc):
desc_dict = ast.literal_eval(desc)
nf_id = desc_dict['network_function_id']
return nf_id
@log_helpers.log_method_call
def vpnservice_updated(self, context, **kwargs):
nfp_context = module_context.init()
LOG.info("Received RPC VPN SERVICE UPDATED with data:%(data)s",
{'data': kwargs})
# Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(kwargs[
'resource']['description'])
nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id)
reason = kwargs['reason']
body = self._data_wrapper(context, kwargs[
'resource']['tenant_id'], nf, **kwargs)
transport.send_request_to_configurator(self._conf,
context, body,
reason)
def _proxy_subnet_cidr(self, description):
tokens = description.split(';')
return tokens[5].split('=')[1]
def _get_vpnservices(self, context, tenant_id, vpnservice_id, desc):
filters = {'tenant_id': [tenant_id],
'id': [vpnservice_id]}
args = {'context': context, 'filters': filters}
vpnservices = self._db_inst.get_vpnservices(**args)
for vpnservice in vpnservices:
vpnservice['description'] = desc
return vpnservices
def _get_ikepolicies(self, context, tenant_id, ikepolicy_id):
filters = {'tenant_id': [tenant_id],
'id': [ikepolicy_id]}
args = {'context': context, 'filters': filters}
return self._db_inst.get_ikepolicies(**args)
def _get_ipsecpolicies(self, context, tenant_id, ipsecpolicy_id):
filters = {'tenant_id': [tenant_id],
'id': [ipsecpolicy_id]}
args = {'context': context, 'filters': filters}
return self._db_inst.get_ipsecpolicies(**args)
def _get_ipsec_site_conns(self, context, tenant_id, ipsec_site_conn_id,
desc):
filters = {'tenant_id': [tenant_id],
'id': [ipsec_site_conn_id]}
args = {'context': context, 'filters': filters}
ipsec_site_conns = self._db_inst.get_ipsec_site_connections(**args)
for ipsec_site_conn in ipsec_site_conns:
ipsec_site_conn['description'] = desc
return ipsec_site_conns

View File

@ -1,235 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import traceback
from gbpservice.contrib.nfp.config_orchestrator.common import (
lbv2_constants as lbv2_const)
from gbpservice.contrib.nfp.config_orchestrator.common import (
topics as a_topics)
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import transport
import oslo_messaging as messaging
LOG = nfp_logging.getLogger(__name__)
class RpcHandler(object):
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, conf, sc):
super(RpcHandler, self).__init__()
self.conf = conf
self.sc = sc
def network_function_notification(self, context, notification_data):
module_context.init()
try:
LOG.info("Received NETWORK FUNCTION NOTIFICATION:"
"%(notification)s",
{'notification': notification_data['notification']})
if notification_data['info']['service_type'] is not None:
handler = NaasNotificationHandler(self.conf, self.sc)
handler.\
handle_notification(context, notification_data)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ("Generic exception (%s) while handling message (%s) : %s"
% (e,
notification_data,
traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
LOG.error(msg)
class FirewallNotifier(object):
def __init__(self, conf, sc):
self._sc = sc
self._conf = conf
def set_firewall_status(self, context, notification_data):
nfp_context = module_context.init()
notification = notification_data['notification'][0]
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
resource_data = notification['data']
firewall_id = resource_data['firewall_id']
status = resource_data['status']
LOG.info("Received firewall configuration create complete API, "
"making an RPC call set firewall status for "
"firewall:%(firewall)s and status: %(status)s",
{'firewall': firewall_id,
'status': status})
# RPC call to plugin to set firewall status
rpcClient = transport.RPCClient(a_topics.FW_NFP_PLUGIN_TOPIC)
rpcClient.cctxt.cast(context, 'set_firewall_status',
host=resource_data['host'],
firewall_id=firewall_id,
status=status)
def firewall_deleted(self, context, notification_data):
nfp_context = module_context.init()
notification = notification_data['notification'][0]
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
resource_data = notification['data']
firewall_id = resource_data['firewall_id']
LOG.info("Received firewall_configuration_delete_complete API, "
"making an RPC call firewall_deleted for firewall:"
"%(firewall)s ",
{'firewall': firewall_id})
# RPC call to plugin to update firewall deleted
rpcClient = transport.RPCClient(a_topics.FW_NFP_PLUGIN_TOPIC)
rpcClient.cctxt.cast(context, 'firewall_deleted',
host=resource_data['host'],
firewall_id=firewall_id)
class LoadbalancerV2Notifier(object):
def __init__(self, conf, sc):
self._sc = sc
self._conf = conf
def update_status(self, context, notification_data):
nfp_context = module_context.init()
notification = notification_data['notification'][0]
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
resource_data = notification['data']
obj_type = resource_data['obj_type']
obj_id = resource_data['obj_id']
rpcClient = transport.RPCClient(a_topics.LBV2_NFP_PLUGIN_TOPIC)
rpcClient.cctxt = rpcClient.client.prepare(
version=const.LOADBALANCERV2_RPC_API_VERSION)
lb_p_status = const.ACTIVE
lb_o_status = None
obj_p_status = resource_data['provisioning_status']
obj_o_status = resource_data['operating_status']
LOG.info("Received LB's update_status API. Making an "
"update_status RPC call to plugin for %(obj_type)s:"
"%(obj_id)s with status: %(status)s",
{'obj_type': obj_type,
'obj_id': obj_id,
'status': obj_p_status})
if obj_type == 'healthmonitor':
obj_o_status = None
if obj_type != 'loadbalancer':
rpcClient.cctxt.cast(context, 'update_status',
obj_type=obj_type,
obj_id=obj_id,
provisioning_status=obj_p_status,
operating_status=obj_o_status)
else:
lb_o_status = lbv2_const.ONLINE
if obj_p_status == const.ERROR:
lb_p_status = const.ERROR
lb_o_status = lbv2_const.OFFLINE
rpcClient.cctxt.cast(context, 'update_status',
obj_type='loadbalancer',
obj_id=resource_data['root_lb_id'],
provisioning_status=lb_p_status,
operating_status=lb_o_status)
# TODO(jiahao): implememnt later
def update_loadbalancer_stats(self, context, loadbalancer_id, stats_data):
pass
class VpnNotifier(object):
def __init__(self, conf, sc):
self._sc = sc
self._conf = conf
def update_status(self, context, notification_data):
nfp_context = module_context.init()
resource_data = notification_data['notification'][0]['data']
request_info = notification_data.get('info')
request_context = request_info.get('context')
logging_context = request_context.get('logging_context', {})
nfp_context['log_context'] = logging_context
status = resource_data['status']
LOG.info("Received VPN's update_status API. "
"Making an update_status RPC cast to plugin for object"
"with status: %(status)s",
{'status': status})
rpcClient = transport.RPCClient(a_topics.VPN_NFP_PLUGIN_TOPIC)
rpcClient.cctxt.cast(context, 'update_status',
status=status)
def ipsec_site_conn_deleted(self, context, notification_data):
pass
ServicetypeToHandlerMap = {'firewall': FirewallNotifier,
'loadbalancerv2': LoadbalancerV2Notifier,
'vpn': VpnNotifier}
class NaasNotificationHandler(object):
def __init__(self, conf, sc):
self.conf = conf
self.sc = sc
def handle_notification(self, context, notification_data):
try:
LOG.debug("Handling Notification with Data:%s",
notification_data)
resource_data = notification_data['notification'][0]['data']
handler = ServicetypeToHandlerMap[notification_data[
'info']['service_type']](self.conf, self.sc)
method = getattr(handler, resource_data['notification_type'])
# Handle RPC Event
method(context, notification_data)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ("Generic exception (%s) while handling message (%s) : %s"
% (e,
notification_data,
traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
LOG.error(msg)

View File

@ -1,45 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg as oslo_config
from gbpservice.nfp.core import context
from gbpservice.nfp.orchestrator import context as module_context
context.NfpContext = module_context.NfpContext
openstack_opts = [
oslo_config.StrOpt('auth_host',
default='localhost',
help='Openstack controller IP Address'),
# REVISIT: In future, use nfp_user with admin role instead of admin_user
oslo_config.StrOpt('admin_user',
help='Admin user name to create service VMs'),
oslo_config.StrOpt('admin_password',
help='Admin password to create service VMs'),
# REVISIT: In future, use nfp_tenant_name instead of admin_tenant_name
oslo_config.StrOpt('admin_tenant_name',
help='Admin tenant name to create service VMs'),
oslo_config.StrOpt('admin_tenant_id',
help='Admin tenant ID to create service VMs'),
oslo_config.StrOpt('auth_protocol',
default='http', help='Auth protocol used.'),
oslo_config.IntOpt('auth_port',
default='5000', help='Auth protocol used.'),
oslo_config.IntOpt('bind_port',
default='9696', help='Auth protocol used.'),
oslo_config.StrOpt('auth_version',
default='v2.0', help='Auth protocol used.'),
oslo_config.StrOpt('auth_uri',
default='', help='Auth URI.'),
]
oslo_config.CONF.register_opts(openstack_opts, "nfp_keystone_authtoken")

View File

@ -1,86 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.config_orchestrator.common import (
topics as a_topics)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
firewall as fw)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import (
loadbalancerv2 as lbv2)
from gbpservice.contrib.nfp.config_orchestrator.handlers.config import vpn
from gbpservice.contrib.nfp.config_orchestrator.handlers.notification import (
handler as notif_handler)
from gbpservice.nfp.core.rpc import RpcAgent
from oslo_config import cfg
def rpc_init(sc, conf):
fwrpcmgr = fw.FwAgent(conf, sc)
fwagent = RpcAgent(
sc,
host=cfg.CONF.host,
topic=a_topics.FW_NFP_CONFIGAGENT_TOPIC,
manager=fwrpcmgr
)
lbv2_report_state = {
'binary': 'NCO',
'host': cfg.CONF.host,
'topic': a_topics.LBV2_NFP_CONFIGAGENT_TOPIC,
'plugin_topic': a_topics.LBV2_NFP_PLUGIN_TOPIC,
'agent_type': 'NFP Loadbalancer V2 agent',
'configurations': {'device_drivers': ['loadbalancerv2']},
'start_flag': True,
'report_interval': 10
}
lbv2rpcmgr = lbv2.Lbv2Agent(conf, sc)
lbv2agent = RpcAgent(
sc,
host=cfg.CONF.host,
topic=a_topics.LBV2_NFP_CONFIGAGENT_TOPIC,
manager=lbv2rpcmgr,
report_state=lbv2_report_state
)
vpn_report_state = {
'binary': 'NCO',
'host': cfg.CONF.host,
'topic': a_topics.VPN_NFP_CONFIGAGENT_TOPIC,
'plugin_topic': a_topics.VPN_NFP_PLUGIN_TOPIC,
'agent_type': 'NFP Vpn agent',
'configurations': {'device_drivers': ['vpn']},
'start_flag': True,
'report_interval': 10
}
vpnrpcmgr = vpn.VpnAgent(conf, sc)
vpnagent = RpcAgent(
sc,
host=cfg.CONF.host,
topic=a_topics.VPN_NFP_CONFIGAGENT_TOPIC,
manager=vpnrpcmgr,
report_state=vpn_report_state
)
rpchandler = notif_handler.RpcHandler(conf, sc)
rpcagent = RpcAgent(
sc,
host=cfg.CONF.host,
topic=a_topics.CONFIG_ORCH_TOPIC,
manager=rpchandler,
)
sc.register_rpc_agents([fwagent, lbv2agent, vpnagent, rpcagent])
def nfp_module_init(sc, conf):
rpc_init(sc, conf)

View File

@ -1,338 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
import oslo_serialization.jsonutils as jsonutils
import pecan
import pika
from gbpservice.nfp.pecan import base_controller
LOG = logging.getLogger(__name__)
n_rpc.init(cfg.CONF)
class Controller(base_controller.BaseController):
"""Implements all the APIs Invoked by HTTP requests.
Implements following HTTP methods.
-get
-post
-put
According to the HTTP request received from config-agent this class make
call/cast to configurator and return response to config-agent
"""
def __init__(self, method_name):
try:
self.method_name = method_name
self.services = pecan.conf['cloud_services']
self.rpc_routing_table = {}
for service in self.services:
self._entry_to_rpc_routing_table(service)
configurator_notifications = self.services[0]['notifications']
self.rmqconsumer = RMQConsumer(configurator_notifications['host'],
configurator_notifications['queue']
)
super(Controller, self).__init__()
except Exception as err:
msg = (
"Failed to initialize Controller class %s." %
str(err).capitalize())
LOG.error(msg)
def _entry_to_rpc_routing_table(self, service):
"""Prepares routing table based on the uservice configuration.
This routing table is used to route the rpcs to all interested
uservices. Key used for routing is the uservice[apis].
:param uservice
e.g uservice = {'service_name': 'configurator',
'topic': 'configurator',
'reporting_interval': '10', # in seconds
'apis': ['CONFIGURATION', 'EVENT']
}
Returns: None
Prepares: self.rpc_routing_table
e.g self.rpc_routing_table = {'CONFIGURATION': [rpc_client, ...],
'EVENT': [rpc_client, ...]
}
"""
for api in service['apis']:
if api not in self.rpc_routing_table:
self.rpc_routing_table[api] = []
self.rpc_routing_table[api].append(CloudService(**service))
@pecan.expose(method='GET', content_type='application/json')
def get(self):
"""Method of REST server to handle request get_notifications.
This method send an RPC call to configurator and returns Notification
data to config-agent
Returns: Dictionary that contains Notification data
"""
try:
if self.method_name == 'get_notifications':
notification_data = self.rmqconsumer.pull_notifications()
msg = ("NOTIFICATION_DATA sent to config_agent %s"
% notification_data)
LOG.info(msg)
return jsonutils.dumps(notification_data)
except Exception as err:
pecan.response.status = 400
msg = ("Failed to handle request=%s. Reason=%s."
% (self.method_name, str(err).capitalize()))
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
@pecan.expose(method='POST', content_type='application/json')
def post(self, **body):
"""Method of REST server to handle all the post requests.
This method sends an RPC cast to configurator according to the
HTTP request.
:param body: This method excepts dictionary as a parameter in HTTP
request and send this dictionary to configurator with RPC cast.
Returns: None
"""
try:
body = None
if pecan.request.is_body_readable:
body = pecan.request.json_body
routing_key = body.pop("routing_key", "CONFIGURATION")
for uservice in self.rpc_routing_table[routing_key]:
uservice.rpcclient.cast(self.method_name, body)
msg = ('Sent RPC to %s' % (uservice.topic))
LOG.info(msg)
msg = ("Successfully served HTTP request %s" % self.method_name)
LOG.info(msg)
except Exception as err:
pecan.response.status = 400
msg = ("Failed to serve HTTP post request %s %s."
% (self.method_name, str(err).capitalize()))
# extra_import = ("need to remove this import %s" % config)
# LOG.debug(extra_import)
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
@pecan.expose(method='PUT', content_type='application/json')
def put(self, **body):
"""Method of REST server to handle all the put requests.
This method sends an RPC cast to configurator according to the
HTTP request.
:param body: This method excepts dictionary as a parameter in HTTP
request and send this dictionary to configurator with RPC cast.
Returns: None
"""
try:
body = None
if pecan.request.is_body_readable:
body = pecan.request.json_body
routing_key = body.pop("routing_key", "CONFIGURATION")
for uservice in self.rpc_routing_table[routing_key]:
uservice.rpcclient.cast(self.method_name, body)
msg = ('Sent RPC to %s' % (uservice.topic))
LOG.info(msg)
msg = ("Successfully served HTTP request %s" % self.method_name)
LOG.info(msg)
except Exception as err:
pecan.response.status = 400
msg = ("Failed to serve HTTP put request %s %s."
% (self.method_name, str(err).capitalize()))
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
def _format_description(self, msg):
"""This methgod formats error description.
:param msg: An error message that is to be formatted
Returns: error_data dictionary
"""
error_data = {'failure_desc': {'msg': msg}}
return error_data
class RPCClient(object):
"""Implements call/cast methods used in REST Controller.
Implements following methods.
-call
-cast
This class send an RPC call/cast to configurator according to the data sent
by Controller class of REST server.
"""
API_VERSION = '1.0'
def __init__(self, topic):
self.topic = topic
target = oslo_messaging.Target(
topic=self.topic,
version=self.API_VERSION)
self.client = n_rpc.get_client(target)
def call(self, method_name):
"""Method for sending call request on behalf of REST Controller.
This method sends an RPC call to configurator.
Returns: Notification data sent by configurator.
"""
cctxt = self.client.prepare(version=self.API_VERSION,
topic=self.topic)
return cctxt.call(self, method_name)
def cast(self, method_name, request_data):
"""Method for sending cast request on behalf of REST Controller.
This method sends an RPC cast to configurator according to the
method_name passed by COntroller class of REST server.
:param method_name:method name can be any of the following.
Returns: None.
"""
cctxt = self.client.prepare(version=self.API_VERSION,
topic=self.topic)
return cctxt.cast(self,
method_name,
request_data=request_data)
def to_dict(self):
"""This function return empty dictionary.
For making RPC call/cast it internally requires context class that
contains to_dict() function. Here we are sending context inside
request data so we are passing class itself as a context that
contains to_dict() function.
Returns: Dictionary.
"""
return {}
class CloudService(object):
""" CloudService keeps all information of uservice along with initialized
RPCClient object using which rpc is routed to over the cloud service.
"""
def __init__(self, **kwargs):
self.service_name = kwargs.get('service_name')
self.topic = kwargs.get('topic')
self.reporting_interval = kwargs.get('reporting_interval')
self.rpcclient = RPCClient(topic=self.topic)
class RMQConsumer(object):
"""RMQConsumer for over the cloud services.
This class access rabbitmq's 'configurator-notifications' queue
to pull all the notifications came from over the cloud services.
"""
def __init__(self, rabbitmq_host, queue):
self.rabbitmq_host = rabbitmq_host
self.queue = queue
self.create_connection()
def create_connection(self):
try:
self.connection = pika.BlockingConnection(
pika.ConnectionParameters
(host=self.rabbitmq_host))
except Exception as e:
msg = ("Failed to create rmq connection %s" % (e))
LOG.error(msg)
def _fetch_data_from_wrapper_strct(self, oslo_notifications):
notifications = []
for oslo_notification_data in oslo_notifications:
notification_data = jsonutils.loads(
oslo_notification_data["oslo.message"]
)["args"]["notification_data"]
notifications.extend(notification_data)
return notifications
def pull_notifications(self):
notifications = []
msgs_acknowledged = False
try:
self.channel = self.connection.channel()
self.queue_declared = self.channel.queue_declare(queue=self.queue,
durable=True)
self.channel.queue_bind(self.queue, 'openstack')
pending_msg_count = self.queue_declared.method.message_count
log = ('[notifications queue:%s, pending notifications:%s]'
% (self.queue, pending_msg_count))
LOG.info(log)
for i in range(pending_msg_count):
method, properties, body = self.channel.basic_get(self.queue)
notifications.append(jsonutils.loads(body))
# Acknowledge all messages delivery
if pending_msg_count > 0:
self.channel.basic_ack(delivery_tag=method.delivery_tag,
multiple=True)
msgs_acknowledged = True
self.channel.close()
return self._fetch_data_from_wrapper_strct(notifications)
except pika.exceptions.ConnectionClosed:
msg = ("Caught ConnectionClosed exception.Creating new connection")
LOG.error(msg)
self.create_connection()
return self._fetch_data_from_wrapper_strct(notifications)
except pika.exceptions.ChannelClosed:
msg = ("Caught ChannelClosed exception.")
LOG.error(msg)
if msgs_acknowledged is False:
return self.pull_notifications()
else:
return self._fetch_data_from_wrapper_strct(notifications)

View File

@ -1,64 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from gbpservice.contrib.nfp.configurator.advanced_controller import (
controller)
"""This class forwards HTTP request to controller class.
This class create an object of Controller class with appropriate
parameter according to the path of HTTP request. According to the
parameter passed to Controller class it sends an RPC call/cast to
configurator.
"""
class ControllerResolver(object):
create_network_function_device_config = controller.Controller(
"create_network_function_device_config")
delete_network_function_device_config = controller.Controller(
"delete_network_function_device_config")
update_network_function_device_config = controller.Controller(
"update_network_function_device_config")
create_network_function_config = controller.Controller(
"create_network_function_config")
delete_network_function_config = controller.Controller(
"delete_network_function_config")
update_network_function_config = controller.Controller(
"update_network_function_config")
get_notifications = controller.Controller("get_notifications")
network_function_event = controller.Controller("network_function_event")
get_requests = controller.Controller("get_requests")
""" This class forwards HTTP requests starting with /v1/nfp.
All HTTP requests with path starting from /v1
land here. This class forward request with path starting from /v1/nfp
to ControllerResolver.
"""
class V1Controller(object):
nfp = ControllerResolver()
@pecan.expose()
def get(self):
return {'versions': [{'status': 'CURRENT',
'updated': '2014-12-11T00:00:00Z',
'id': 'v1'}]}

View File

@ -1,280 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import rpc as n_rpc
from oslo_config import cfg
import oslo_messaging as messaging
from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
n_rpc.init(cfg.CONF)
LOG = nfp_logging.getLogger(__name__)
class AgentBaseRPCManager(object):
"""Implements base class for all service agents.
Common methods for service agents are implemented in this class.
Configurator module invokes these methods through the service
agent's child class instance.
"""
def __init__(self, sc, conf):
self.sc = sc
self.conf = conf
def validate_request(self, sa_req_list, notification_data):
"""Preliminary validation of function input.
:param sa_req_list: List of data blobs prepared by de-multiplexer
for service agents processing.
:param notification_data: Notification blobs prepared by the service
agents after processing requests blobs. Each request blob will have
a corresponding notification blob.
Returns: True if validation passes. False if validation fails.
"""
if (isinstance(sa_req_list, list) and
isinstance(notification_data, dict)):
return True
else:
return False
def get_diff_of_dict(self, old_dict, new_dict):
"""Getting difference between two dict.
:param Two dictionary
Returns: Two dictionary which has different values for same keys.
"""
diff_values = []
new_val = {}
old_val = {}
for key in new_dict:
if old_dict.get(key) != new_dict.get(key):
diff_values.append(key)
for value in diff_values:
if value == 'description':
pass
else:
new_val[value] = new_dict.get(value)
old_val[value] = old_dict.get(value)
return old_val, new_val
def process_request(self, sa_req_list, notification_data):
"""Forwards the RPC message from configurator to service agents.
Checks if the request message contains multiple data blobs. If multiple
data blobs are found, a batch event is generated otherwise a single
event.
:param sa_req_list: List of data blobs prepared by de-multiplexer
for service agents processing.
:param notification_data: Notification blobs prepared by the service
agents after processing requests blobs. Each request blob will have
a corresponding notification blob.
Returns: None
"""
# In case of malformed input, send failure notification
if not self.validate_request(sa_req_list, notification_data):
# REVISIT(JAGADISH): Need to send failure notification
return
# Multiple request data blobs needs batch processing. Send batch
# processing event or do direct processing of single request data blob
if (len(sa_req_list) > 1):
LOG.info("Creating event PROCESS BATCH")
args_dict = {
'sa_req_list': sa_req_list,
'notification_data': notification_data
}
ev = self.sc.new_event(id=const.EVENT_PROCESS_BATCH,
data=args_dict, key=None)
self.sc.post_event(ev)
else:
agent_info = sa_req_list[0]['agent_info']
# Renaming the neutron context in resource data of *aaS to context.
# Adding agent_info which contains information required for
# demux and response data in agent to neutron_context in *aaS
if not sa_req_list[0]['is_generic_config'] and not (
agent_info['resource'] in const.NFP_SERVICE_LIST):
# Here, the neutron context is overloaded with agent_info
# dict which contains the API context in addition to other
# fields like service type, service vendor, resource etc.
# The agent_info dict is constructed inside the demuxer library
sa_req_list[0]['resource_data']['neutron_context'].update(
{'agent_info': agent_info})
# When calling the *aaS or NFPService agents, the
# "neutron context" passed inside the resource data is
# renamed to "context"
sa_req_list[0]['resource_data']['context'] = sa_req_list[0][
'resource_data'].pop('neutron_context')
getattr(self, sa_req_list[0]['method'])(
**sa_req_list[0]['resource_data'])
else:
sa_req_list[0]['agent_info'].update(
{'notification_data': notification_data})
getattr(self, sa_req_list[0]['method'])(
agent_info, sa_req_list[0]['resource_data'])
class AgentBaseNotification(object):
"""Enqueues notification event into notification queue
Responses from the REST calls made to the VM are fed to under the
cloud components using this notification handle.
"""
API_VERSION = '1.0'
def __init__(self, sc):
self.sc = sc
self.topic = const.NOTIFICATION_QUEUE
target = messaging.Target(topic=self.topic,
version=self.API_VERSION)
self.client = n_rpc.get_client(target)
self.cctxt = self.client.prepare(version=self.API_VERSION,
topic=self.topic)
def _notification(self, data):
"""Enqueues notification event into const.NOTIFICATION_QUEUE
These events are enqueued into notification queue and are retrieved
when get_notifications() API lands on configurator.
:param data: Event data blob
Returns: None
"""
self.cctxt.cast(self, 'send_notification', notification_data=[data])
def to_dict(self):
return {}
class AgentBaseEventHandler(nfp_api.NfpEventHandler):
""" Super class for all agents to handle batch events.
"""
def __init__(self, sc, drivers, rpcmgr):
self.sc = sc
self.drivers = drivers
self.rpcmgr = rpcmgr
self.notify = AgentBaseNotification(self.sc)
def process_batch(self, ev):
"""Processes a request with multiple data blobs.
Configurator processes the request with multiple data blobs and sends
a list of service information to be processed. This function goes
through the list of service information and invokes specific service
driver methods. After processing each request data blob, notification
data blob is prepared.
:param ev: Event instance that contains information of event type and
corresponding event data to be processed.
"""
# Get service agent information list and notification data list
# from the event data
sa_req_list = ev.data.get('sa_req_list')
notification_data = ev.data.get('notification_data')
for request in sa_req_list:
try:
# Process the first data blob from the request list.
# Get necessary parameters needed for driver method invocation.
method = request['method']
is_generic_config = request['is_generic_config']
resource_data = request['resource_data']
agent_info = request['agent_info']
resource = agent_info['resource']
# agent_info contains the API context.
context = agent_info['context']
service_vendor = agent_info['service_vendor']
service_type = agent_info['resource_type']
service_feature = agent_info['service_feature']
if not is_generic_config:
sa_req_list[0]['resource_data']['context'] = sa_req_list[
0]['resource_data'].pop('neutron_context')
# Get the service driver and invoke its method
driver = self._get_driver(service_type, service_vendor,
service_feature)
# Service driver should return "success" on successful API
# processing. All other return values and exceptions are
# treated as failures.
if is_generic_config:
result = getattr(driver, method)(context, resource_data)
else:
result = getattr(driver, method)(**resource_data)
success = True if result == 'SUCCESS' else False
except Exception as err:
result = ("Failed to process %s request. %s" %
(method, str(err).capitalize()))
success = False
finally:
# Prepare success notification and populate notification
# data list
if result in const.SUCCESS:
data = {'status_code': const.SUCCESS}
else:
data = {'status_code': const.FAILURE,
'error_msg': result}
msg = {'info': {'service_type': service_type,
'context': context},
'notification': [{'resource': resource,
'data': data}]
}
# If the data processed is first one, then prepare notification
# dict. Otherwise, append the notification to the kwargs list.
# Whether it is a data batch or single data blob request,
# notification generated will be single dictionary. In case of
# batch, multiple notifications are sent in the kwargs list.
if not notification_data:
notification_data.update(msg)
else:
data = {'resource': resource,
'data': data}
notification_data['notification'].append(data)
if not success:
self.notify._notification(notification_data)
raise Exception(msg)
self.notify._notification(notification_data)
def init_agent_complete(cm, sc, conf):
"""Placeholder method to satisfy configurator module agent loading."""
pass
def init_agent(cm, sc, conf):
"""Placeholder method to satisfy configurator module agent loading."""
pass

View File

@ -1,510 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
import os
import oslo_messaging as messaging
from oslo_serialization import jsonutils
import requests
import six
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import fw_constants as const
from gbpservice.contrib.nfp.configurator.lib import utils as load_driver
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
LOG = nfp_logging.getLogger(__name__)
class FwaasRpcSender(agent_base.AgentBaseEventHandler):
""" Implements Fwaas response path to Neutron plugin.
Methods of this class are invoked by the FwaasEventHandler class
for sending response from driver to the Fwaas Neutron plugin.
"""
def __init__(self, sc, host, drivers, rpcmgr):
super(FwaasRpcSender, self).__init__(sc, drivers, rpcmgr)
self.host = host
def set_firewall_status(self, agent_info,
firewall_id, status, firewall=None):
""" Enqueues the response from FwaaS operation to neutron plugin.
:param context: Neutron context
:param firewall_id: id of firewall resource
:param status: ACTIVE/ ERROR
"""
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{
'resource': agent_info['resource'],
'data': {'firewall_id': firewall_id,
'host': self.host,
'status': status,
'notification_type': (
'set_firewall_status')}}]
}
LOG.info("Sending Notification 'Set Firewall Status' to "
"Orchestrator for firewall: %(fw_id)s with status:"
"%(status)s",
{'fw_id': firewall_id,
'status': status})
self.notify._notification(msg)
def firewall_deleted(self, agent_info, firewall_id, firewall=None):
""" Enqueues the response from FwaaS operation to neutron plugin.
:param context: Neutron context
:param firewall_id: id of firewall resource
"""
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': agent_info['context']},
'notification': [{
'resource': agent_info['resource'],
'data': {'firewall_id': firewall_id,
'host': self.host,
'notification_type': (
'firewall_deleted')}}]
}
LOG.info("Sending Notification 'Firewall Deleted' to "
"Orchestrator for firewall: %(fw_id)s ",
{'fw_id': firewall_id})
self.notify._notification(msg)
class FWaasRpcManager(agent_base.AgentBaseRPCManager):
""" Implements FWaasRpcManager class which receives requests
from Configurator to Agent.
Methods of this class are invoked by the configurator. Events are
created according to the requests received and enqueued to worker queues.
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, sc, conf):
"""Instantiates child and parent class objects.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param conf: Configuration object that is used for configuration
parameter access.
"""
super(FWaasRpcManager, self).__init__(sc, conf)
def _create_event(self, context, firewall, host, method):
""" Creates and enqueues the events to the worker queues.
:param context: Neutron context
:param firewall: Firewall resource object from neutron fwaas plugin
:param host: Name of the host machine
:param method: CREATE_FIREWALL/UPDATE_FIREWALL/DELETE_FIREWALL
"""
# To solve the huge data issue with firewalls,
# especially with 250 firewall rule test which
# gets multipled with each consumer in the chain.
# Even the zipped data is huge and cannot be sent
# over pipe. Writing it to file here and event handler
# will read it from file and process further.
filename = "/tmp/" + firewall['id']
with open(filename, 'w') as f:
f.write(jsonutils.dumps(firewall))
arg_dict = {'context': context,
'firewall': {'file_path': filename},
'host': host}
# REVISIT(mak): How to send large data ?
# New API required to send over unix sockert ?
context['service_info'] = {}
# ev = self.sc.new_event(id=method, data={}, key=None)
ev = self.sc.new_event(id=method, data=arg_dict, key=None)
self.sc.post_event(ev)
def create_firewall(self, context, firewall, host):
""" Receives request to create firewall from configurator
"""
LOG.info("Received request 'Create Firewall'.")
self._create_event(context, firewall,
host, const.FIREWALL_CREATE_EVENT)
def update_firewall(self, context, firewall, host):
""" Receives request to update firewall from configurator
"""
LOG.info("Received request 'Update Firewall'.")
self._create_event(context, firewall,
host, const.FIREWALL_UPDATE_EVENT)
def delete_firewall(self, context, firewall, host):
""" Receives request to delete firewall from configurator
"""
LOG.info("Received request 'Delete Firewall'.")
self._create_event(context, firewall,
host, const.FIREWALL_DELETE_EVENT)
class FWaasEventHandler(nfp_api.NfpEventHandler):
""" Handler class which invokes firewall driver methods
Worker processes dequeue the worker queues and invokes the
appropriate handler class methods for Fwaas methods.
"""
def __init__(self, sc, drivers, rpcmgr, conf):
""" Instantiates class object.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param drivers: dictionary of driver name to object mapping
:param rpcmgr: FwaasRpcManager class object
"""
self.sc = sc
self.conf = conf
self.drivers = drivers
self.host = self.conf.host
self.rpcmgr = rpcmgr
self.plugin_rpc = FwaasRpcSender(sc, self.host,
self.drivers, self.rpcmgr)
def _get_driver(self, service_vendor, service_feature):
""" Retrieves driver object given the service type
"""
driver_id = const.SERVICE_TYPE + service_vendor + service_feature
return self.drivers[driver_id]
def _is_firewall_rule_exists(self, fw):
""" Checks if firewall rules are present in the request data
:param fw: Firewall resource object
"""
if not fw['firewall_rule_list']:
return False
else:
return True
def handle_event(self, ev):
""" Demultiplexes the firewall request to appropriate
driver methods.
:param ev: event object sent from process model event handler
"""
try:
msg = ("Handling event %s" % (ev.id))
LOG.info(msg)
# The context here in ev.data is the neutron context that was
# renamed to context in the agent_base. This erstwhile
# neutron context contains the agent info which in turn contains
# the API context alongside other relevant information like
# service vendor and type. Agent info is constructed inside
# the demuxer library.
if ev.data['firewall'].get('file_path', None):
filename = ev.data['firewall']['file_path']
string = str()
with open(filename, 'r') as f:
string = f.read()
ev.data['firewall'] = jsonutils.loads(string)
try:
os.remove(filename)
except Exception as e:
msg = ("Exception while removing the file %r, "
"with error: %r" % (filename, e))
LOG.error(msg)
agent_info = ev.data['context']['agent_info']
service_vendor = agent_info['service_vendor']
service_feature = agent_info.get('service_feature', '')
driver = self._get_driver(service_vendor, service_feature)
LOG.info("Invoking driver with service vendor:"
"%(service_vendor)s ",
{'service_vendor': service_vendor})
self.method = getattr(driver, "%s" % (ev.id.lower()))
self.invoke_driver_for_plugin_api(ev)
msg = ("Handled event %s successfully" % (ev.id))
LOG.info(msg)
except Exception as err:
msg = ("Failed handling event: %s. Reason %s"
% (ev.id, str(err).capitalize()))
LOG.error(msg)
def _remove_duplicate_fw_rules(self, rules):
""" Removes duplicate rules from the rules list. """
# 'description' filter field needs to be added if required
filter_keys = ['action', 'destination_ip_address', 'destination_port',
'enabled', 'ip_version', 'protocol',
'source_ip_address', 'source_port', 'shared']
filter_rules = []
for rule in rules:
filter_rules.append({k: rule[k] for k in filter_keys})
unique_rules = [dict(tupleized) for tupleized in set(
tuple(rule.items()) for rule in filter_rules)]
result = []
for d1 in unique_rules:
for d2 in rules:
if d1.items() <= d2.items():
result.append(d2)
break
result.sort(key=operator.itemgetter('position'))
for index, x in enumerate(result):
x['position'] = index + 1
return result
def invoke_driver_for_plugin_api(self, ev):
""" Invokes the appropriate driver methods
:param ev: event object sent from process model event handler
"""
context = ev.data['context']
agent_info = context.get('agent_info')
firewall = ev.data.get('firewall')
host = ev.data.get('host')
firewall['firewall_rule_list'] = self._remove_duplicate_fw_rules(
firewall['firewall_rule_list'])
if ev.id == const.FIREWALL_CREATE_EVENT:
if not self._is_firewall_rule_exists(firewall):
msg = ("Firewall rule list is empty, setting Firewall "
"status to ACTIVE %s" % (firewall))
LOG.info(msg)
return self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'],
common_const.STATUS_ACTIVE, firewall)
# Added to handle in service vm agents. VM agent will add
# default DROP rule.
# if not self._is_firewall_rule_exists(firewall):
# self.plugin_rpc.set_firewall_status(
# context, firewall['id'], const.STATUS_ACTIVE)
try:
status = self.method(context, firewall, host)
except Exception as err:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], common_const.STATUS_ERROR)
msg = ("Failed to configure Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
else:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status, firewall)
msg = ("Configured Firewall and status set to %s" % status)
LOG.info(msg)
elif ev.id == const.FIREWALL_DELETE_EVENT:
if not self._is_firewall_rule_exists(firewall):
msg = ("Firewall rule list is empty, sending firewall deleted "
"status to plugin %s" % (firewall))
LOG.info(msg)
return self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
try:
status = self.method(context, firewall, host)
except requests.ConnectionError:
# REVISIT(VIKASH): It can't be correct everytime
msg = ("There is a connection error for firewall %r of "
"tenant %r. Assuming either there is serious "
"issue with VM or data path is completely "
"broken. For now marking that as delete."
% (firewall['id'], firewall['tenant_id']))
LOG.warning(msg)
self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
except Exception as err:
# REVISIT(VIKASH): Is it correct to raise ? As the subsequent
# attempt to clean will only re-raise the last one.And it
# can go on and on and may not be ever recovered.
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], common_const.STATUS_ERROR)
msg = ("Failed to delete Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
# raise(err)
else:
if status == common_const.STATUS_ERROR:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status)
else:
msg = ("Firewall %r deleted of tenant: %r" % (
firewall['id'], firewall['tenant_id']))
LOG.info(msg)
self.plugin_rpc.firewall_deleted(
agent_info, firewall['id'], firewall)
elif ev.id == const.FIREWALL_UPDATE_EVENT:
if not self._is_firewall_rule_exists(firewall):
return self.plugin_rpc.set_firewall_status(
agent_info,
common_const.STATUS_ACTIVE, firewall)
try:
status = self.method(context, firewall, host)
except Exception as err:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], common_const.STATUS_ERROR)
msg = ("Failed to update Firewall and status is "
"changed to ERROR. %s." % str(err).capitalize())
LOG.error(msg)
else:
self.plugin_rpc.set_firewall_status(
agent_info, firewall['id'], status, firewall)
msg = ("Updated Firewall and status set to %s" % status)
LOG.info(msg)
else:
msg = ("Wrong call to Fwaas event handler.")
raise Exception(msg)
def events_init(sc, drivers, rpcmgr, conf):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param drivers: Driver instances registered with the service agent
:param rpcmgr: Instance to receive all the RPC messages from configurator
module.
Returns: None
"""
event_id_list = [const.FIREWALL_CREATE_EVENT,
const.FIREWALL_UPDATE_EVENT,
const.FIREWALL_DELETE_EVENT]
evs = []
for event in event_id_list:
evs.append(nfp_event.Event(id=event, handler=FWaasEventHandler(
sc, drivers, rpcmgr, conf)))
sc.register_events(evs)
def load_drivers(conf):
"""Imports all the driver files corresponding to this agent.
Returns: Dictionary of driver objects with a specified service type and
vendor name
"""
ld = load_driver.ConfiguratorUtils(conf)
drivers = ld.load_drivers(const.SERVICE_TYPE)
for service_type, driver_name in six.iteritems(drivers):
driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj
LOG.info("Firewall loaded drivers:%(drivers)s",
{'drivers': drivers})
return drivers
def register_service_agent(cm, sc, conf, rpcmgr):
"""Registers Fwaas service agent with configurator module.
:param cm: Instance of configurator module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
:param rpcmgr: Instance containing RPC methods which are invoked by
configurator module on corresponding RPC message arrival
"""
service_type = const.SERVICE_TYPE
cm.register_service_agent(service_type, rpcmgr)
def init_agent(cm, sc, conf):
"""Initializes Fwaas agent.
:param cm: Instance of configuration module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
"""
try:
drivers = load_drivers(conf)
except Exception as err:
msg = ("Fwaas failed to load drivers. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas loaded drivers successfully.")
LOG.debug(msg)
rpcmgr = FWaasRpcManager(sc, conf)
try:
events_init(sc, drivers, rpcmgr, conf)
except Exception as err:
msg = ("Fwaas Events initialization unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas Events initialization successful.")
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf, rpcmgr)
except Exception as err:
msg = ("Fwaas service agent registration unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Fwaas service agent registration successful.")
LOG.debug(msg)
msg = ("FIREWALL as a Service Module Initialized.")
LOG.info(msg)
def init_agent_complete(cm, sc, conf):
""" Initializes periodic tasks
"""
msg = (" Firewall agent init complete")
LOG.info(msg)

View File

@ -1,546 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import six
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import (
generic_config_constants as gen_cfg_const)
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import utils
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
LOG = nfp_logging.getLogger(__name__)
class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
"""Implements APIs invoked by configurator for processing RPC messages.
RPC client of configurator module receives RPC messages from REST server
and invokes the API of this class. The instance of this class is registered
with configurator module using register_service_agent API. Configurator
module identifies the service agent object based on service type and
invokes ones of the methods of this class to configure the device.
"""
def __init__(self, sc, conf):
"""Instantiates child and parent class objects.
Passes the instances of core service controller and oslo configuration
to parent instance inorder to provide event enqueue facility for batch
processing event.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
"""
self.parse = data_parser.DataParser()
super(GenericConfigRpcManager, self).__init__(sc, conf)
def _send_event(self, context, resource_data, event_id, event_key=None):
"""Posts an event to framework.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: Keyword arguments which are passed as data to event
:param event_id: Unique identifier for the event
:param event_key: Event key for serialization
"""
arg_dict = {'context': context,
'resource_data': resource_data}
ev = self.sc.new_event(id=event_id, data=arg_dict, key=event_key)
self.sc.post_event(ev)
def configure_interfaces(self, context, resource_data):
"""Enqueues event for worker to process configure interfaces request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CONFIGURE_INTERFACES)
def clear_interfaces(self, context, resource_data):
"""Enqueues event for worker to process clear interfaces request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CLEAR_INTERFACES)
def configure_routes(self, context, resource_data):
"""Enqueues event for worker to process configure routes request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CONFIGURE_ROUTES)
def clear_routes(self, context, resource_data):
"""Enqueues event for worker to process clear routes request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CLEAR_ROUTES)
def configure_healthmonitor(self, context, resource_data):
"""Enqueues event for worker to process configure healthmonitor
request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
LOG.info("Received configure health monitor api for nfds:"
"%(nfds)s",
{'nfds': resource_data['nfds']})
resource_data['fail_count'] = 0
self._send_event(context,
resource_data,
gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR,
resource_data['nfds'][0]['vmid'])
def clear_healthmonitor(self, context, resource_data):
"""Enqueues event for worker to process clear healthmonitor request.
:param context: The agent info dictionary prepared in demuxer library
which contains the API context alongside other information.
:param kwargs: RPC Request data
Returns: None
"""
LOG.info("Received clear health monitor api for nfds:"
"%(nfds)s",
{'nfds': resource_data['nfds']})
event_key = resource_data['nfds'][0]['vmid']
poll_event_id = gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR
self.sc.stop_poll_event(event_key, poll_event_id)
class GenericConfigEventHandler(agent_base.AgentBaseEventHandler,
nfp_api.NfpEventHandler):
"""Implements event handlers and their helper methods.
Object of this class is registered with the event class of core service
controller. Based on the event key, handle_event method of this class is
invoked by core service controller.
"""
def __init__(self, sc, drivers, rpcmgr):
super(GenericConfigEventHandler, self).__init__(
sc, drivers, rpcmgr)
self.sc = sc
def _get_driver(self, service_type, service_vendor, service_feature):
"""Retrieves service driver object based on service type input.
Currently, service drivers are identified with service type. Support
for single driver per service type is provided. When multi-vendor
support is going to be provided, the driver should be selected based
on both service type and vendor name.
:param service_type: Service type - firewall/vpn/loadbalancer
Returns: Service driver instance
"""
return self.drivers[service_type + service_vendor + service_feature]
def handle_event(self, ev):
"""Processes the generated events in worker context.
Processes the following events.
- Configure Interfaces
- Clear Interfaces
- Configure routes
- Clear routes
- Configure health monitor
- Clear health monitor
Enqueues responses into notification queue.
Returns: None
"""
try:
event_data = ev.data
if ev.id == 'PROCESS_BATCH':
NFI = event_data['sa_req_list'][0][
'agent_info']['context']['nfi_id']
NF = event_data['sa_req_list'][0][
'agent_info']['context']['nf_id']
else:
NFI = event_data['context']['context']['nfi_id']
NF = event_data['context']['context']['nf_id']
except Exception:
NFI = None
NF = None
msg = ("Handling event '%s', with NF:%s and NFI:%s"
% (ev.id, NF, NFI))
LOG.info(msg)
# Process batch of request data blobs
try:
# Process batch of request data blobs
if ev.id == common_const.EVENT_PROCESS_BATCH:
self.process_batch(ev)
return
# Process HM poll events
elif ev.id == gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR:
resource_data = ev.data.get('resource_data')
periodicity = resource_data['nfds'][0]['periodicity']
EV_CONF_HM_MAXRETRY = (
gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY)
if periodicity == gen_cfg_const.INITIAL:
self.sc.poll_event(
ev,
max_times=EV_CONF_HM_MAXRETRY)
elif periodicity == gen_cfg_const.FOREVER:
self.sc.poll_event(ev)
else:
self._process_event(ev)
except Exception as err:
msg = ("Failed to process event %s, reason %s "
% (ev.data, err))
LOG.error(msg)
return
def send_periodic_hm_notification(self, ev, nfd, result, notification_id):
ev_copy = copy.deepcopy(ev)
ev_copy.data["context"]["notification_data"] = {}
ev_copy.data["context"]["context"]["nfp_context"]["id"] = (
notification_id)
ev_copy.data['context']['context']['nfd_id'] = nfd.get('vmid')
notification_data = self._prepare_notification_data(ev_copy, result)
self.notify._notification(notification_data)
def handle_periodic_hm(self, ev, result):
resource_data = ev.data['resource_data']
nfd = ev.data["resource_data"]['nfds'][0]
periodic_polling_reason = nfd["periodic_polling_reason"]
if result == common_const.FAILED:
"""If health monitoring fails continuously for MAX_FAIL_COUNT times
send fail notification to orchestrator
"""
resource_data['fail_count'] = resource_data.get('fail_count') + 1
if (resource_data.get('fail_count') >=
gen_cfg_const.MAX_FAIL_COUNT):
# REVISIT(Shishir): Remove statefull logic from here,
# need to come up with statleless logic.
if periodic_polling_reason == (
gen_cfg_const.DEVICE_TO_BECOME_DOWN):
notification_id = gen_cfg_const.DEVICE_NOT_REACHABLE
self.send_periodic_hm_notification(ev, nfd, result,
notification_id)
nfd["periodic_polling_reason"] = (
gen_cfg_const.DEVICE_TO_BECOME_UP)
elif result == common_const.SUCCESS:
"""set fail_count to 0 if it had failed earlier even once
"""
resource_data['fail_count'] = 0
if periodic_polling_reason == gen_cfg_const.DEVICE_TO_BECOME_UP:
notification_id = gen_cfg_const.DEVICE_REACHABLE
self.send_periodic_hm_notification(ev, nfd, result,
notification_id)
nfd["periodic_polling_reason"] = (
gen_cfg_const.DEVICE_TO_BECOME_DOWN)
def _process_event(self, ev):
LOG.debug(" Handling event %s ", (ev.data))
# Process single request data blob
resource_data = ev.data['resource_data']
# The context inside ev.data is the agent info dictionary prepared
# in demuxer library which contains the API context alongside
# other information like service vendor, type etc..
agent_info = ev.data['context']
context = agent_info['context']
service_type = agent_info['resource_type']
service_vendor = agent_info['service_vendor']
service_feature = agent_info.get('service_feature', '')
try:
msg = ("Worker process with ID: %s starting "
"to handle task: %s for service type: %s. "
% (os.getpid(), ev.id, str(service_type)))
LOG.debug(msg)
driver = self._get_driver(service_type, service_vendor,
service_feature)
# Invoke service driver methods based on event type received
result = getattr(driver, "%s" % ev.id.lower())(context,
resource_data)
except Exception as err:
msg = ("Failed to process ev.id=%s, ev=%s reason=%s" %
(ev.id, ev.data, err))
LOG.error(msg)
result = common_const.FAILED
if ev.id == gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR:
if (resource_data['nfds'][0][
'periodicity'] == gen_cfg_const.INITIAL and
result == common_const.SUCCESS):
notification_data = self._prepare_notification_data(ev,
result)
self.notify._notification(notification_data)
msg = ("VM Health check successful")
LOG.info(msg)
return {'poll': False}
elif resource_data['nfds'][0]['periodicity'] == (
gen_cfg_const.FOREVER):
ev.data["context"]["resource"] = gen_cfg_const.PERIODIC_HM
self.handle_periodic_hm(ev, result)
else:
"""For other events, irrespective of result send notification"""
notification_data = self._prepare_notification_data(ev, result)
self.notify._notification(notification_data)
def prepare_notification_result(self, result):
if result in common_const.SUCCESS:
data = {'status_code': common_const.SUCCESS}
else:
data = {'status_code': common_const.FAILURE,
'error_msg': result}
return data
def _prepare_notification_data(self, ev, result):
"""Prepare notification data as expected by config agent
:param ev: event object
:param result: result of the handled event
Returns: notification_data
"""
agent_info = ev.data['context']
context = agent_info['context']
# Retrieve notification and remove it from context. Context is used
# as transport from batch processing function to this last event
# processing function. To keep the context unchanged, delete the
# notification_data before invoking driver API.
notification_data = agent_info['notification_data']
service_type = agent_info['resource_type']
resource = agent_info['resource']
data = self.prepare_notification_result(result)
msg = {'info': {'service_type': service_type,
'context': context},
'notification': [{'resource': resource,
'data': data}]
}
if not notification_data:
notification_data.update(msg)
else:
data = {'resource': resource,
'data': data}
notification_data['notification'].append(data)
return notification_data
def event_cancelled(self, ev, reason):
"""Invoked by process framework when poll ev object reaches
polling threshold ev.max_times.
Finally it Enqueues response into notification queue.
:param ev: Event object
Returns: None
"""
msg = ('Cancelled poll event. Event Data: %s ' % (ev.data))
LOG.error(msg)
result = common_const.FAILED
notification_data = self._prepare_notification_data(ev, result)
self.notify._notification(notification_data)
@nfp_api.poll_event_desc(
event=gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR,
spacing=gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR_SPACING)
def handle_configure_healthmonitor(self, ev):
"""Decorator method called for poll event CONFIGURE_HEALTHMONITOR
Finally it Enqueues response into notification queue.
:param ev: Event object
Returns: None
"""
return self._process_event(ev)
def events_init(sc, drivers, rpcmgr):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param drivers: Driver instances registered with the service agent
:param rpcmgr: Instance to receive all the RPC messages from configurator
module.
Returns: None
"""
event_id_list = [
gen_cfg_const.EVENT_CONFIGURE_INTERFACES,
gen_cfg_const.EVENT_CLEAR_INTERFACES,
gen_cfg_const.EVENT_CONFIGURE_ROUTES,
gen_cfg_const.EVENT_CLEAR_ROUTES,
gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR,
gen_cfg_const.EVENT_CLEAR_HEALTHMONITOR,
common_const.EVENT_PROCESS_BATCH
]
events = []
for event in event_id_list:
events.append(
nfp_event.Event(
id=event,
handler=GenericConfigEventHandler(sc, drivers, rpcmgr)))
sc.register_events(events)
def load_drivers(conf):
"""Imports all the driver files.
Returns: Dictionary of driver objects with a specified service type and
vendor name
"""
cutils = utils.ConfiguratorUtils(conf)
drivers = cutils.load_drivers()
for service_type, driver_name in six.iteritems(drivers):
driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj
LOG.info("Generic config agent loaded drivers drivers:"
"%(drivers)s",
{'drivers': drivers})
return drivers
def register_service_agent(cm, sc, conf, rpcmgr):
"""Registers generic configuration service agent with configurator module.
:param cm: Instance of configurator module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
:param rpcmgr: Instance containing RPC methods which are invoked by
configurator module on corresponding RPC message arrival
"""
service_type = gen_cfg_const.SERVICE_TYPE
cm.register_service_agent(service_type, rpcmgr)
def init_agent(cm, sc, conf):
"""Initializes generic configuration agent.
:param cm: Instance of configuration module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
"""
try:
drivers = load_drivers(conf)
except Exception as err:
msg = ("Generic configuration agent failed to load service drivers."
"Error:%s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Generic configuration agent loaded service"
" drivers successfully.")
LOG.debug(msg)
rpcmgr = GenericConfigRpcManager(sc, conf)
try:
events_init(sc, drivers, rpcmgr)
except Exception as err:
msg = ("Generic configuration agent failed to initialize events. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Generic configuration agent initialized"
" events successfully.")
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf, rpcmgr)
except Exception as err:
msg = ("Failed to register generic configuration agent with"
" configurator module. %s" % (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = ("Generic configuration agent registered with configuration"
" module successfully.")
LOG.debug(msg)
def init_agent_complete(cm, sc, conf):
msg = ("Initialization of generic configuration agent completed.")
LOG.info(msg)

File diff suppressed because it is too large Load Diff

View File

@ -1,255 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import oslo_messaging as messaging
import six
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import (
nfp_service_constants as const)
from gbpservice.contrib.nfp.configurator.lib import utils as load_driver
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class ConfigScriptRpcManager(agent_base.AgentBaseRPCManager):
""" Implements ConfigScriptRpcManager class which receives requests
from Configurator module.
Methods of this class are invoked by the configurator. Events are
created according to the requests received and enqueued to worker queues.
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, sc, conf):
"""Instantiates child and parent class objects.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param conf: Configuration object that is used for configuration
parameter access.
"""
super(ConfigScriptRpcManager, self).__init__(sc, conf)
def run_nfp_service(self, context, resource_data):
""" Receives request to execute config script.
:param context: RPC context
:param kwargs: Contains configuration script and request information
"""
msg = ("ConfigScriptRpcManager received Create Heat request.")
LOG.debug(msg)
arg_dict = {'context': context,
'resource_data': resource_data}
ev = self.sc.new_event(id=const.CREATE_NFP_SERVICE_EVENT,
data=arg_dict, key=None)
self.sc.post_event(ev)
class ConfigScriptEventHandler(agent_base.AgentBaseEventHandler):
""" Handler class which invokes nfp_service driver methods
Worker processes dequeue the worker queues and invokes the
appropriate handler class methods for ConfigScript methods.
"""
def __init__(self, sc, drivers, rpcmgr):
""" Initializes parent and child class objects.
:param sc: Service Controller object that is used to communicate
with process model.
:param drivers: Dictionary of driver name to object mapping
:param rpcmgr: ConfigScriptRpcManager class object
"""
super(ConfigScriptEventHandler, self).__init__(sc, drivers, rpcmgr)
self.sc = sc
self.drivers = drivers
self.rpcmgr = rpcmgr
def _get_driver(self):
""" Retrieves driver object given the service type.
"""
driver_id = const.SERVICE_TYPE
return self.drivers[driver_id]
def handle_event(self, ev):
""" Demultiplexes the nfp_service request to appropriate
driver methods.
:param ev: Event object sent from process model event handler
"""
try:
agent_info = ev.data['context']
notification_context = agent_info['context']
resource = agent_info['resource']
resource_data = ev.data['resource_data']
msg = ("Worker process with ID: %s starting to "
"handle task: %s of type ConfigScript. "
% (os.getpid(), ev.id))
LOG.debug(msg)
driver = self._get_driver()
self.method = getattr(driver, "run_%s" % resource)
result = self.method(notification_context, resource_data)
except Exception as err:
result = const.ERROR_RESULT
msg = ("Failed to handle event: %s. %s"
% (ev.id, str(err).capitalize()))
LOG.error(msg)
finally:
del agent_info['notification_data']
del agent_info['service_vendor']
service_type = agent_info.pop('resource_type')
if result in const.UNHANDLED_RESULT:
data = {'status_code': const.UNHANDLED_RESULT}
else:
data = {'status_code': const.FAILURE,
'error_msg': result}
msg = {'info': {'service_type': service_type,
'context': notification_context},
'notification': [{'resource': resource,
'data': data}]
}
self.notify._notification(msg)
def events_init(sc, drivers, rpcmgr):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param drivers: Driver instances registered with the service agent
:param rpcmgr: Instance to receive all the RPC messages from configurator
module.
Returns: None
"""
event = nfp_event.Event(
id=const.CREATE_NFP_SERVICE_EVENT,
handler=ConfigScriptEventHandler(sc, drivers, rpcmgr))
sc.register_events([event])
def load_drivers(conf):
"""Imports all the driver files corresponding to this agent.
Returns: Dictionary of driver objects with a specified service type and
vendor name
"""
ld = load_driver.ConfiguratorUtils(conf)
drivers = ld.load_drivers(const.SERVICE_TYPE)
for service_type, driver_name in six.iteritems(drivers):
driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj
return drivers
def register_service_agent(cm, sc, conf, rpcmgr):
"""Registers ConfigScript service agent with configurator module.
:param cm: Instance of configurator module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
:param rpcmgr: Instance containing RPC methods which are invoked by
configurator module on corresponding RPC message arrival
"""
service_type = const.SERVICE_TYPE
cm.register_service_agent(service_type, rpcmgr)
def init_agent(cm, sc, conf):
"""Initializes Config Script agent.
:param cm: Instance of configuration module
:param sc: Instance of core service controller
:param conf: Instance of oslo configuration
"""
try:
drivers = load_drivers(conf)
except Exception as err:
msg = ("Config Script failed to load drivers. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Config Script loaded drivers successfully.")
LOG.debug(msg)
rpcmgr = ConfigScriptRpcManager(sc, conf)
try:
events_init(sc, drivers, rpcmgr)
except Exception as err:
msg = ("Config Script Events initialization unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Config Script Events initialization successful.")
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf, rpcmgr)
except Exception as err:
msg = ("Config Script service agent registration unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Config Script service agent registration successful.")
LOG.debug(msg)
msg = ("ConfigScript as a Service Module Initialized.")
LOG.info(msg)
def init_agent_complete(cm, sc, conf):
""" Initializes periodic tasks
"""
msg = (" Config Script agent init complete")
LOG.info(msg)

View File

@ -1,461 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import oslo_messaging as messaging
import six
from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.lib import data_filter
from gbpservice.contrib.nfp.configurator.lib import utils
from gbpservice.contrib.nfp.configurator.lib import vpn_constants as const
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
LOG = nfp_logging.getLogger(__name__)
class VpnaasRpcSender(data_filter.Filter):
"""
Implements VPNaas response path to Neutron plugin.
Methods of this class are invoked by the VPNaasEventHandler class
for sending response from driver to the VPNaas Neutron plugin.
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, sc):
self._sc = sc
self._notify = agent_base.AgentBaseNotification(sc)
super(VpnaasRpcSender, self).__init__(None, None)
def get_vpn_services(self, context, ids=None, filters=None):
"""Gets list of vpnservices for tenant.
:param context: dictionary which holds details of vpn service type like
For IPSEC connections :
List of vpnservices
lIst of ipsec connections
ike policy & ipsec policy.
:param ids: based on which the filter library extracts the data.
:param filter: based on which the filter library extracts the data.
Returns: Dictionary of vpn service type which matches with the filters.
"""
LOG.info("Sending RPC for GET VPN SERVICES with %(filters)s ",
{'filters': filters})
return self.call(
context,
self.make_msg('get_vpn_services', ids=ids, filters=filters))
def get_vpn_servicecontext(self, context, filters=None):
"""Get list of vpnservice context on this host.
:param context: dictionary which holds details of vpn service type like
For IPSEC connections :
List of vpnservices
lIst of ipsec connections
ike policy & ipsec policy.
:param filter: based on which the filter library extracts the data
from context dictionary.
Returns: dictionary of vpnservice
"""
LOG.info("Sending RPC for GET VPN SERVICECONTEXT with "
"Filters:%(filters)s ",
{'filters': filters})
return self.call(
context,
self.make_msg(
'get_vpn_servicecontext', filters=filters))
def get_ipsec_conns(self, context, filters):
"""
Get list of ipsec conns with filters
specified.
"""
LOG.info("Sending RPC for GET IPSEC CONNS with Filters:"
"%(filters)s ",
{'filters': filters})
return self.call(
context,
self.make_msg(
'get_ipsec_conns',
filters=filters))
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': context['agent_info']['context']},
'notification': [{
'resource': context['agent_info']['resource'],
'data': {'status': status,
'notification_type': (
'update_status')}}]
}
LOG.info("Sending Notification 'Update Status' with "
"status:%(status)s ",
{'status': status})
self._notify._notification(msg)
def ipsec_site_conn_deleted(self, context, resource_id):
""" Notify VPNaaS plugin about delete of ipsec-site-conn """
msg = {'info': {'service_type': const.SERVICE_TYPE,
'context': context['agent_info']['context']},
'notification': [{
'resource': context['agent_info']['resource'],
'data': {'resource_id': resource_id,
'notification_type': (
'ipsec_site_conn_deleted')}}]
}
LOG.info("Sending Notification 'Ipsec Site Conn Deleted' "
"for resource:%(resource_id)s ",
{'resource_id': resource_id})
self._notify._notification(msg)
class VPNaasRpcManager(agent_base.AgentBaseRPCManager):
"""
Implements VPNaasRpcManager class which receives requests
from Configurator to Agent.
Methods of this class are invoked by the configurator. Events are
created according to the requests received and enqueued to worker queues.
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, conf, sc):
"""Instantiates child and parent class objects.
Passes the instances of core service controller and oslo configuration
to parent instance in order to provide event enqueue facility for batch
processing event.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
"""
super(VPNaasRpcManager, self).__init__(sc, conf)
def vpnservice_updated(self, context, **resource_data):
"""Registers the VPNaas plugin events to update the vpn configurations.
:param context: dictionary, confined to the specific service type.
:param resource_data: dictionary, confined to the specific
operation type.
Returns: None
"""
LOG.info("Received request 'VPN Service Updated'."
"for API '%(api)s'",
{'api': resource_data.get('reason', '')})
arg_dict = {'context': context,
'resource_data': resource_data}
# Serializing the event because simultaneous configure
# requests overrides the same crypto-map in the service VM
# which results in corrupting the crypto-map
resource_type = resource_data.get('rsrc_type')
if resource_type and resource_type.lower() == 'ipsec_site_connection':
ev = self.sc.new_event(id='VPNSERVICE_UPDATED',
key=resource_data['resource']['id'],
data=arg_dict,
serialize=True,
binding_key=resource_data[
'resource']['vpnservice_id'])
msg = "serializing event: %s" % ('VPNSERVICE_UPDATED')
LOG.debug(msg)
else:
ev = self.sc.new_event(id='VPNSERVICE_UPDATED', data=arg_dict)
self.sc.post_event(ev)
class VPNaasEventHandler(nfp_api.NfpEventHandler):
"""
Handler class to invoke the vpn driver methods.
For every event that gets invoked from worker process lands over here
to make a call to the driver methods.
"""
def __init__(self, sc, drivers):
""" Instantiates class object.
:param sc: Service Controller object that is used to communicate
with process model core file.
:param drivers: dictionary of driver name to object mapping
"""
self._sc = sc
self._drivers = drivers
self._plugin_rpc = VpnaasRpcSender(self._sc)
def _get_driver(self, service_vendor, service_feature):
driver_id = const.SERVICE_TYPE + service_vendor + service_feature
return self._drivers[driver_id]
def handle_event(self, ev):
"""
Demultiplexes the vpn request to appropriate driver methods.
:param ev: event object sent from the process model.
Returns: None
"""
if ev.id == 'VPN_SYNC':
self._sc.poll_event(ev)
if ev.id == 'VPNSERVICE_UPDATED':
try:
msg = ("Worker process with ID: %s starting "
"to handle task: %s of topic: %s. "
% (os.getpid(),
ev.id, const.VPN_GENERIC_CONFIG_RPC_TOPIC))
LOG.debug(msg)
agent_info = ev.data['context']['agent_info']
service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature']
driver = self._get_driver(service_vendor, service_feature)
LOG.info("Invoking driver with service vendor:"
"%(service_vendor)s ",
{'service_vendor': service_vendor})
setattr(VPNaasEventHandler, "service_driver", driver)
self._vpnservice_updated(ev, driver)
except Exception as err:
msg = ("Failed to perform the operation: %s. %s"
% (ev.id, str(err).capitalize()))
LOG.error(msg)
finally:
self._sc.event_complete(ev)
def _vpnservice_updated(self, ev, driver):
"""
Makes call to the respective operation method of vpn driver.
:param ev: event object sent from the process model.
:param driver: vpn driver class object.
Returns: None.
"""
context = ev.data.get('context')
resource_data = ev.data.get('resource_data')
msg = "Vpn service updated from server side"
LOG.info(msg)
try:
driver.vpnservice_updated(context, resource_data)
if 'ipsec_site_conns' in context['service_info']:
for item in context['service_info']['ipsec_site_conns']:
if item['id'] == resource_data['resource']['id'] and (
resource_data['reason'] == 'create'):
item['status'] = 'INIT'
arg_dict = {'context': context,
'resource_data': resource_data}
ev1 = self._sc.new_event(id='VPN_SYNC',
key='VPN_SYNC', data=arg_dict)
self._sc.post_event(ev1)
break
except Exception as err:
msg = ("Failed to update VPN service. %s"
% str(err).capitalize())
LOG.error(msg)
reason = resource_data.get('reason')
rsrc = resource_data.get('rsrc_type')
if (reason == 'delete' and rsrc == 'ipsec_site_connection'):
conn = resource_data['resource']
resource_id = conn['id']
self._plugin_rpc.ipsec_site_conn_deleted(context,
resource_id=resource_id)
def _get_service_vendor(self, vpn_svc):
"""
Extracts the vendor from the description.
:param vpn_svc: vpn service operation type dictionary,
which it gets from filter library
Returns: None
"""
svc_desc = vpn_svc['description']
tokens = svc_desc.split(';')
vendor = tokens[5].split('=')[1]
return vendor
def _sync_ipsec_conns(self, context, svc_context):
"""
Gets the status of the vpn service.
:param context: Dictionary of the vpn service type.
:param vendor: vendor name
:param svc_context: vpn service operation type dictionary,
which it gets filter library
Returns: None
"""
try:
return self.service_driver.check_status(context, svc_context)
except Exception as err:
msg = ("Failed to sync ipsec connection information. %s."
% str(err).capitalize())
LOG.error(msg)
@nfp_api.poll_event_desc(event='VPN_SYNC', spacing=10)
def sync(self, ev):
"""Periodically updates the status of vpn service, whether the
tunnel is UP or DOWN.
:param context: Dictionary of the vpn service type.
Returns: None
"""
context = ev.data.get('context')
s2s_contexts = self._plugin_rpc.get_vpn_servicecontext(context)
state = self._sync_ipsec_conns(context, s2s_contexts[0])
if state in {const.STATE_ACTIVE,
const.STATE_ERROR}:
return {'poll': False}
def events_init(sc, drivers):
"""Registers events with core service controller.
All the events will come to handle_event method of class instance
registered in 'handler' field.
:param sc: Object of Service Controller from the process model to regiters
the different events
:param drivers: Driver instance registered with the service agent
Returns: None
"""
evs = [
nfp_event.Event(id='VPNSERVICE_UPDATED',
handler=VPNaasEventHandler(sc, drivers)),
nfp_event.Event(id='VPN_SYNC',
handler=VPNaasEventHandler(sc, drivers))]
sc.register_events(evs)
def load_drivers(sc, conf):
"""Loads the drivers dynamically.
Loads the drivers that register with the agents.
:param sc: Object of the Service Controller class from core
service controller.
Returns: dictionary of instances of the respective driver classes.
"""
ld = utils.ConfiguratorUtils(conf)
drivers = ld.load_drivers(const.SERVICE_TYPE)
for service_type, driver_name in six.iteritems(drivers):
driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj
return drivers
def register_service_agent(cm, sc, conf):
"""Registers the agents with Cofigurator module.
Puts all the agents into the dictionary with their service types.
:prarm cm: Configurator module's object to communicate back and forth
:param sc: Object of the Service Controller class from core
service controller.
:param conf: Object of oslo configurator passed from the core service
controller
Returns: None
"""
rpcmgr = VPNaasRpcManager(conf, sc)
cm.register_service_agent(const.SERVICE_TYPE, rpcmgr)
def init_agent(cm, sc, conf):
"""Loads the drivers and registers the agents.
Loads the dynamicaaly both the drivers and agents, registers the agents
with their service types.
:prarm cm: Configurator module's object to communicate back and forth
:param sc: Object of the Service Controller class from core
service controller.
:param conf: Object of oslo configurator passed from the core service
controller
Returns: None
"""
try:
drivers = load_drivers(sc, conf)
except Exception as err:
msg = ("VPNaas failed to load drivers. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = "VPNaas loaded drivers successfully."
LOG.debug(msg)
try:
events_init(sc, drivers)
except Exception as err:
msg = ("VPNaas Events initialization unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = "VPNaas Events initialization successful."
LOG.debug(msg)
try:
register_service_agent(cm, sc, conf)
bdobj = base_driver.BaseDriver(conf)
bdobj.register_agent_object_with_driver('agent', VpnaasRpcSender(sc))
except Exception as err:
msg = ("VPNaas service agent registration unsuccessful. %s"
% (str(err).capitalize()))
LOG.error(msg)
raise err
else:
msg = "VPNaas service agent registration successful."
LOG.debug(msg)
msg = "VPN as a Service Module Initialized."
LOG.info(msg)
def init_agent_complete(cm, sc, conf):
"""
Initializes periodic tasks.
"""
msg = " vpn agent init complete"
LOG.info(msg)

View File

@ -1,167 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from oslo_serialization import jsonutils
import requests
from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
def set_class_attr(**kwargs):
def f(class_obj):
for key, value in list(kwargs.items()):
setattr(class_obj, key.lower(), value.lower())
return class_obj
return f
class BaseDriver(object):
""" Implements common functions for drivers.
Every service vendor must inherit this class. If any service vendor wants
to add extra methods for their service, apart from below given, they should
add method definition here and implement the method in their driver
"""
def __init__(self, conf):
pass
def configure_healthmonitor(self, context, resource_data):
"""Checks if the Service VM is reachable.
It does netcat to the CONFIGURATION_SERVER_PORT of the Service VM.
Configuration agent runs inside Service VM. Once agent is up and
reachable, Service VM is assumed to be active.
:param context - context
:param resource_data - data coming from orchestrator
Returns: SUCCESS/FAILED
"""
resource_data = self.parse.parse_data(const.HEALTHMONITOR,
resource_data)
ip = resource_data.get('mgmt_ip')
port = str(self.port)
command = 'nc ' + ip + ' ' + port + ' -z'
return self._check_vm_health(command)
def configure_interfaces(self, context, kwargs):
return const.SUCCESS
def clear_interfaces(self, context, kwargs):
return const.SUCCESS
def configure_routes(self, context, kwargs):
return const.SUCCESS
def clear_routes(self, context, kwargs):
return const.SUCCESS
def clear_healthmonitor(self, context, kwargs):
return const.SUCCESS
def register_agent_object_with_driver(self, name, agent_obj):
setattr(BaseDriver, name, agent_obj)
def _check_vm_health(self, command):
"""Ping based basic HM support provided by BaseDriver.
Service provider can override the method implementation
if they want to support other types.
:param command - command to execute
Returns: SUCCESS/FAILED
"""
msg = ("Executing command %s for VM health check" % (command))
LOG.debug(msg)
try:
subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True)
except Exception as e:
msg = ("VM health check failed. Command '%s' execution failed."
" Reason=%s" % (command, e))
LOG.debug(msg)
return const.FAILED
return const.SUCCESS
def _configure_log_forwarding(self, url, mgmt_ip, port, headers=None):
""" Configures log forwarding IP address in Service VMs.
:param url: url format that is used to invoke the Service VM API
:param mgmt_ip: management IP of the Service VM
:param port: port that is listened to by the Service VM agent
Returns: SUCCESS/Error msg
"""
url = url % (mgmt_ip, port, 'configure-rsyslog-as-client')
log_forward_ip_address = self.conf.configurator.log_forward_ip_address
if not log_forward_ip_address:
msg = ("Log forwarding IP address not configured "
"for service at %s." % mgmt_ip)
LOG.info(msg)
return const.UNHANDLED
data = dict(
server_ip=log_forward_ip_address,
server_port=self.conf.configurator.log_forward_port,
log_level=self.conf.configurator.log_level)
data = jsonutils.dumps(data)
msg = ("Initiating POST request to configure log forwarding "
"for service at: %r" % mgmt_ip)
LOG.info(msg)
try:
resp = requests.post(url, data=data,
timeout=self.timeout, headers=headers)
except requests.exceptions.ConnectionError as err:
msg = ("Failed to establish connection to service at: "
"%r for configuring log forwarding. ERROR: %r" %
(mgmt_ip, str(err).capitalize()))
LOG.error(msg)
return msg
except requests.exceptions.RequestException as err:
msg = ("Unexpected ERROR happened while configuring "
"log forwarding for service at: %r. "
"ERROR: %r" %
(mgmt_ip, str(err).capitalize()))
LOG.error(msg)
return msg
try:
result = resp.json()
except ValueError as err:
msg = ("Unable to parse response of configure log forward API, "
"invalid JSON. URL: %r. %r" % (url, str(err).capitalize()))
LOG.error(msg)
return msg
if not result['status']:
msg = ("Error configuring log forwarding for service "
"at %s. URL: %r. Reason: %s." %
(mgmt_ip, url, result['reason']))
LOG.error(msg)
return msg
msg = ("Successfully configured log forwarding for "
"service at %s." % mgmt_ip)
LOG.info(msg)
return const.SUCCESS

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
VYOS = 'vyos'
CONFIGURATION_SERVER_PORT = '8888'
REST_TIMEOUT = 180
request_url = "http://%s:%s/%s"
INTERFACE_NOT_FOUND = "INTERFACE NOT FOUND"

View File

@ -1,695 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_serialization import jsonutils
import requests
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import (
vyos_fw_constants as const)
from gbpservice.contrib.nfp.configurator.lib import (
generic_config_constants as gen_cfg_const)
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class RestApi(object):
""" Issues REST calls to the Service VMs
REST API wrapper class that provides POST method to
communicate with the Service VM.
"""
def __init__(self, timeout):
self.timeout = timeout
def request_type_to_api_map(self, url, data, request_type, headers):
return getattr(requests, request_type)(url,
data=data, timeout=self.timeout,
headers=headers)
def fire(self, url, data, request_type, headers):
""" Invokes REST POST call to the Service VM.
:param url: URL to connect.
:param data: data to be sent.
:param request_type: POST/PUT/DELETE
Returns: SUCCESS/Error message
"""
try:
msg = ("SENDING CURL request to URL: %s, request_type:%s, "
"vm with data %s"
% (url, request_type, data))
LOG.debug(msg)
resp = self.request_type_to_api_map(url, data,
request_type.lower(), headers)
except requests.exceptions.ConnectionError as err:
msg = ("Failed to establish connection to the service at URL: %r. "
"ERROR: %r" % (url, str(err).capitalize()))
return msg
except Exception as err:
msg = ("Failed to issue %r call "
"to service. URL: %r, Data: %r. Error: %r" %
(request_type.upper(), url, data, str(err).capitalize()))
return msg
try:
result = resp.json()
except ValueError as err:
msg = ("Unable to parse response, invalid JSON. URL: "
"%r. %r" % (url, str(err).capitalize()))
return msg
if resp.status_code not in common_const.SUCCESS_CODES or (
result.get('status') is False):
return result
return common_const.STATUS_SUCCESS
class FwGenericConfigDriver(base_driver.BaseDriver):
""" Implements device configuration requests.
Firewall generic configuration driver for handling device
configuration requests from Orchestrator.
"""
def __init__(self):
self.parse = data_parser.DataParser()
def _parse_vm_context(self, context):
try:
username = str(context['service_vm_context'][
'vyos']['username'])
password = str(context['service_vm_context'][
'vyos']['password'])
headers = {'Content-Type': 'application/json',
'username': username,
'password': password}
return headers
except Exception as e:
msg = ("Failed to get header from context. ERROR: %s" % e)
LOG.error(msg)
raise Exception(msg)
def configure_healthmonitor(self, context, resource_data):
vm_status = super(FwGenericConfigDriver, self).configure_healthmonitor(
context, resource_data)
if resource_data['nfds'][0]['periodicity'] == gen_cfg_const.INITIAL:
if vm_status == common_const.SUCCESS:
try:
resp = self.configure_user(context, resource_data)
if resp != common_const.STATUS_SUCCESS:
return common_const.FAILURE
except Exception as e:
msg = ("Failed to configure user. ERROR: %s" % e)
LOG.error(msg)
return common_const.FAILURE
return vm_status
def configure_user(self, context, resource_data):
headers = self._parse_vm_context(context)
resource_data = self.parse.parse_data(common_const.HEALTHMONITOR,
resource_data)
mgmt_ip = resource_data['mgmt_ip']
url = const.request_url % (mgmt_ip,
self.port,
'change_auth')
data = {}
LOG.info("Initiating POST request to configure Authentication "
"service at mgmt ip:%(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Change Auth POST request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.POST, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Configured user authentication successfully"
" for vyos service at %r." % mgmt_ip)
LOG.info(msg)
return resp
err_msg += (("Failed to change Authentication para Status code "
"Status code: %r, Reason: %r" %
(resp['status'], resp['reason']))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def _configure_static_ips(self, context, resource_data):
""" Configure static IPs for provider and stitching interfaces
of service VM.
Issues REST call to service VM for configuration of static IPs.
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
static_ips_info = dict(
provider_ip=resource_data.get('provider_ip'),
provider_cidr=resource_data.get('provider_cidr'),
provider_mac=resource_data.get('provider_mac'),
stitching_ip=resource_data.get('stitching_ip'),
stitching_cidr=resource_data.get('stitching_cidr'),
stitching_mac=resource_data.get('stitching_mac'))
mgmt_ip = resource_data['mgmt_ip']
url = const.request_url % (mgmt_ip,
self.port,
'add_static_ip')
data = jsonutils.dumps(static_ips_info)
LOG.info("Initiating POST request to add static IPs for primary "
"service at mgmt ip:%(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Static IP POST request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.POST, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Static IPs successfully added for service at %r." % url)
LOG.info(msg)
return resp
err_msg += (("Status code: %r, Reason: %r" %
(resp['status'], resp['reason']))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def configure_interfaces(self, context, resource_data):
""" Configure interfaces for the service VM.
Calls static IP configuration function and implements
persistent rule addition in the service VM.
Issues REST call to service VM for configuration of interfaces.
:param context: neutron context
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
resource_data = self.parse.parse_data(common_const.INTERFACES,
resource_data)
mgmt_ip = resource_data['mgmt_ip']
try:
result_log_forward = self._configure_log_forwarding(
const.request_url, mgmt_ip, self.port, headers)
except Exception as err:
msg = ("Failed to configure log forwarding for service at %s. "
"Error: %s" % (mgmt_ip, err))
LOG.error(msg)
else:
if result_log_forward == common_const.UNHANDLED:
pass
elif result_log_forward != common_const.STATUS_SUCCESS:
# Failure in log forward configuration won't break chain
# creation. However, error will be logged for detecting
# failure.
msg = ("Failed to configure log forwarding for service at %s."
" Error: %s" % (mgmt_ip, result_log_forward))
LOG.error(msg)
try:
result_static_ips = self._configure_static_ips(context,
resource_data)
except Exception as err:
msg = ("Failed to add static IPs. Error: %s" % err)
LOG.error(msg)
return msg
else:
if result_static_ips != common_const.STATUS_SUCCESS:
return result_static_ips
rule_info = dict(
provider_mac=resource_data['provider_mac'],
stitching_mac=resource_data['stitching_mac'])
url = const.request_url % (mgmt_ip,
self.port, 'add_rule')
data = jsonutils.dumps(rule_info)
LOG.info("Initiating POST request to add persistent rule to "
"primary service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Add persistent rule POST request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.POST, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Persistent rule successfully added for "
"service at %r." % url)
LOG.info(msg)
# wait for 10secs for the ip address to get configured. Sometimes
# observed that 'set_routes' fail with 'ip not configured' error.
time.sleep(10)
return resp
err_msg += (("Status code: %r" % resp['status'])
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def _clear_static_ips(self, context, resource_data):
""" Clear static IPs for provider and stitching
interfaces of the service VM.
Issues REST call to service VM for deletion of static IPs.
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
static_ips_info = dict(
provider_ip=resource_data.get('provider_ip'),
provider_cidr=resource_data.get('provider_cidr'),
provider_mac=resource_data.get('provider_mac'),
stitching_ip=resource_data.get('stitching_ip'),
stitching_cidr=resource_data.get('stitching_cidr'),
stitching_mac=resource_data.get('stitching_mac'))
mgmt_ip = resource_data['mgmt_ip']
url = const.request_url % (mgmt_ip,
self.port,
'del_static_ip')
data = jsonutils.dumps(static_ips_info)
LOG.info("Initiating POST request to remove static IPs for "
"primary service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Static IP DELETE request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.DELETE, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Static IPs successfully removed for service at %r." % url)
LOG.info(msg)
return resp
err_msg += (("Status code: %r, Reason: %r" %
(resp['status'], resp['reason']))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def clear_interfaces(self, context, resource_data):
""" Clear interfaces for the service VM.
Calls static IP clear function and implements
persistent rule deletion in the service VM.
Issues REST call to service VM for deletion of interfaces.
:param context: neutron context
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
resource_data = self.parse.parse_data(common_const.INTERFACES,
resource_data)
try:
result_static_ips = self._clear_static_ips(context, resource_data)
except Exception as err:
msg = ("Failed to remove static IPs. Error: %s" % err)
LOG.error(msg)
return msg
else:
if result_static_ips != common_const.STATUS_SUCCESS:
return result_static_ips
else:
LOG.info("Successfully removed static IPs. "
"Result: %(result_static_ips)s",
{'result_static_ips': result_static_ips})
rule_info = dict(
provider_mac=resource_data['provider_mac'],
stitching_mac=resource_data['stitching_mac'])
mgmt_ip = resource_data['mgmt_ip']
LOG.info("Initiating DELETE persistent rule for primary "
"service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
url = const.request_url % (mgmt_ip, self.port, 'delete_rule')
data = jsonutils.dumps(rule_info)
err_msg = ("Persistent rule DELETE request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.DELETE, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Persistent rules successfully deleted "
"for service at %r." % url)
LOG.info(msg)
return resp
err_msg += (("Status code: %r." % resp['status'])
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def configure_routes(self, context, resource_data):
""" Configure routes for the service VM.
Issues REST call to service VM for configuration of routes.
:param context: neutron context
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
forward_routes = resource_data.get('forward_route')
resource_data = self.parse.parse_data(common_const.ROUTES,
resource_data)
mgmt_ip = resource_data.get('mgmt_ip')
gateway_ip = resource_data.get('stitching_gw_ip')
# checking whether VPN service is present in the chain
# if yes, just configure the stitching pbr else
# configure both stitching and provider pbrs.
if not forward_routes:
source_cidrs = [resource_data.get('stitching_cidr')]
else:
source_cidrs = [resource_data.get('provider_cidr'),
resource_data.get('stitching_cidr')]
url = const.request_url % (mgmt_ip, self.port,
'add-source-route')
route_info = []
for source_cidr in source_cidrs:
route_info.append({'source_cidr': source_cidr,
'gateway_ip': gateway_ip})
data = jsonutils.dumps(route_info)
LOG.info("Initiating POST request to configure route of primary "
"service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Configure routes POST request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.POST, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Configured routes successfully for service at %r." % url)
LOG.info(msg)
return resp
err_msg += (("Status code: %r, Reason: %r" %
(resp['status'], resp['reason']))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
def clear_routes(self, context, resource_data):
""" Clear routes for the service VM.
Issues REST call to service VM for deletion of routes.
:param context: neutron context
:param resource_data: a dictionary of firewall rules and objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context)
resource_data = self.parse.parse_data(common_const.ROUTES,
resource_data)
mgmt_ip = resource_data.get('mgmt_ip')
source_cidrs = [resource_data.get('provider_cidr'),
resource_data.get('stitching_cidr')]
url = const.request_url % (mgmt_ip, self.port,
'delete-source-route')
route_info = []
for source_cidr in source_cidrs:
route_info.append({'source_cidr': source_cidr})
data = jsonutils.dumps(route_info)
LOG.info("Initiating Delete route to primary "
"service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip})
err_msg = ("Routes DELETE request to the VyOS firewall "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.DELETE, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return err_msg
if resp is common_const.STATUS_SUCCESS:
msg = ("Routes successfully removed for service at %r." % url)
LOG.info(msg)
return resp
err_msg += (("Status code: %r, Reason: %r" %
(resp['status'], resp['reason']))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return err_msg
@base_driver.set_class_attr(SERVICE_TYPE=fw_const.SERVICE_TYPE,
SERVICE_VENDOR=const.VYOS)
class FwaasDriver(FwGenericConfigDriver):
""" Firewall as a service driver for handling firewall
service configuration requests.
We initialize service type in this class because agent loads
class object only for those driver classes that have service type
initialized. Also, only this driver class is exposed to the agent.
"""
def __init__(self, conf):
self.conf = conf
self.timeout = const.REST_TIMEOUT
self.rest_api = RestApi(self.timeout)
self.host = self.conf.host
self.port = const.CONFIGURATION_SERVER_PORT
super(FwaasDriver, self).__init__()
def create_firewall(self, context, firewall, host):
""" Implements firewall creation
Issues REST call to service VM for firewall creation
:param context: Neutron context
:param firewall: Firewall resource object from neutron fwaas plugin
:param host: Name of the host machine
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context['agent_info']['context'])
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
LOG.info("Processing request 'Create Firewall' in FWaaS Driver "
"for Firewall ID: %(f_id)s",
{'f_id': firewall['id']})
mgmt_ip = resource_data.get('mgmt_ip')
url = const.request_url % (mgmt_ip,
self.port,
'configure-firewall-rule')
msg = ("Initiating POST request for FIREWALL ID: %r Tenant ID:"
" %r. URL: %s" % (firewall['id'], firewall['tenant_id'], url))
LOG.debug(msg)
data = jsonutils.dumps(firewall)
err_msg = ("Configure firewall POST request to the VyOS "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.POST, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return common_const.STATUS_ERROR
if resp is common_const.STATUS_SUCCESS:
LOG.info("Configured firewall successfully at URL: %(url)s ",
{'url': url})
return common_const.STATUS_ACTIVE
err_msg += (("Reason: %r, Response Content: %r" %
(resp.pop('message'), resp))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return common_const.STATUS_ERROR
def update_firewall(self, context, firewall, host):
""" Implements firewall updation
Issues REST call to service VM for firewall updation
:param context: Neutron context
:param firewall: Firewall resource object from neutron fwaas plugin
:param host: Name of the host machine
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context['agent_info']['context'])
LOG.info("Processing request 'Update Firewall' in FWaaS Driver "
"for Firewall ID:%(f_id)s",
{'f_id': firewall['id']})
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
mgmt_ip = resource_data.get('mgmt_ip')
url = const.request_url % (mgmt_ip,
self.port,
'update-firewall-rule')
msg = ("Initiating UPDATE request. URL: %s" % url)
LOG.debug(msg)
data = jsonutils.dumps(firewall)
err_msg = ("Update firewall POST request to the VyOS "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.PUT, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return common_const.STATUS_ERROR
if resp is common_const.STATUS_SUCCESS:
msg = ("Updated firewall successfully for service at %r." % url)
LOG.debug(msg)
return common_const.STATUS_ACTIVE
err_msg += (("Reason: %r, Response Content: %r" %
(resp.pop('message'), resp))
if type(resp) is dict
else ("Reason: " + resp))
LOG.error(err_msg)
return common_const.STATUS_ERROR
def delete_firewall(self, context, firewall, host):
""" Implements firewall deletion
Issues REST call to service VM for firewall deletion
:param context: Neutron context
:param firewall: Firewall resource object from neutron fwaas plugin
:param host: Name of the host machine
Returns: SUCCESS/Failure message with reason.
"""
headers = self._parse_vm_context(context['agent_info']['context'])
LOG.info("Processing request 'Delete Firewall' in FWaaS Driver "
"for Firewall ID:%(f_id)s",
{'f_id': firewall['id']})
resource_data = self.parse.parse_data(common_const.FIREWALL, context)
mgmt_ip = resource_data.get('mgmt_ip')
url = const.request_url % (mgmt_ip,
self.port,
'delete-firewall-rule')
msg = ("Initiating DELETE request. URL: %s" % url)
LOG.info(msg)
data = jsonutils.dumps(firewall)
err_msg = ("Delete firewall POST request to the VyOS "
"service at %s failed. " % url)
try:
resp = self.rest_api.fire(url, data, common_const.DELETE, headers)
except Exception as err:
err_msg += ("Reason: %r" % str(err).capitalize())
LOG.error(err_msg)
return common_const.STATUS_SUCCESS
if resp is common_const.STATUS_SUCCESS:
msg = ("Deleted firewall successfully for service at %r." % url)
LOG.info(msg)
return common_const.STATUS_DELETED
if type(resp) is dict:
if not resp.get('delete_success') and (
resp.get('message') == const.INTERFACE_NOT_FOUND):
err_msg += ("Firewall was not deleted as interface was not "
"available in the firewall. It might have got "
"detached. So marking this delete as SUCCESS. "
"URL: %r, Response Content: %r" %
(url, resp.content))
LOG.error(err_msg)
return common_const.STATUS_SUCCESS
else:
err_msg += ("Response Content: %r" % resp)
else:
err_msg += ("Reason: " + resp)
LOG.error(err_msg)
msg = ("Firewall deletion has failed, but still sending"
"status as firewall deleted success from configurator")
LOG.info(msg)
return common_const.STATUS_DELETED

View File

@ -1,728 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module holds the data models for the load balancer service plugin. These
are meant simply as replacement data structures for dictionaries and
SQLAlchemy models. Using dictionaries as data containers for many components
causes readability issues and does not intuitively give the benefits of what
classes and OO give. Using SQLAlchemy models as data containers for many
components can become an issue if you do not want to give certain components
access to the database.
These data models do provide methods for instantiation from SQLAlchemy models
and also converting to dictionaries.
"""
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants as l_const
import six
if six.PY3:
unicode = str
class BaseDataModel(object):
# NOTE(ihrachys): we could reuse the list to provide a default __init__
# implementation. That would require handling custom default values though.
fields = []
def to_dict(self, **kwargs):
ret = {}
for attr in self.__dict__:
if attr.startswith('_') or not kwargs.get(attr, True):
continue
if isinstance(getattr(self, attr), list):
ret[attr] = []
for item in self.__dict__[attr]:
if isinstance(item, BaseDataModel):
ret[attr].append(item.to_dict())
else:
ret[attr] = item
elif isinstance(getattr(self, attr), BaseDataModel):
ret[attr] = self.__dict__[attr].to_dict()
elif isinstance(self.__dict__[attr], unicode):
ret[attr.encode('utf8')] = self.__dict__[attr].encode('utf8')
else:
ret[attr] = self.__dict__[attr]
return ret
def to_api_dict(self, **kwargs):
return {}
@classmethod
def from_dict(cls, model_dict):
fields = {k: v for k, v in list(model_dict.items())
if k in cls.fields}
return cls(**fields)
@property
def root_loadbalancer(self):
"""Returns the loadbalancer this instance is attached to."""
if isinstance(self, LoadBalancer):
lb = self
elif isinstance(self, Listener):
lb = self.loadbalancer
elif isinstance(self, L7Policy):
lb = self.listener.loadbalancer
elif isinstance(self, L7Rule):
lb = self.policy.listener.loadbalancer
elif isinstance(self, Pool):
lb = self.loadbalancer
elif isinstance(self, SNI):
lb = self.listener.loadbalancer
else:
# Pool Member or Health Monitor
lb = self.pool.loadbalancer
return lb
# NOTE(brandon-logan) AllocationPool, HostRoute, Subnet, IPAllocation, Port,
# and ProviderResourceAssociation are defined here because there aren't any
# data_models defined in core neutron or neutron services. Instead of jumping
# through the hoops to create those I've just defined them here. If ever
# data_models or similar are defined in those packages, those should be used
# instead of these.
class AllocationPool(BaseDataModel):
fields = ['start', 'end']
def __init__(self, start=None, end=None):
self.start = start
self.end = end
class HostRoute(BaseDataModel):
fields = ['destination', 'nexthop']
def __init__(self, destination=None, nexthop=None):
self.destination = destination
self.nexthop = nexthop
class Subnet(BaseDataModel):
fields = ['id', 'name', 'tenant_id', 'network_id', 'ip_version', 'cidr',
'gateway_ip', 'enable_dhcp', 'ipv6_ra_mode', 'ipv6_address_mode',
'shared', 'dns_nameservers', 'host_routes', 'allocation_pools',
'subnetpool_id']
def __init__(self, id=None, name=None, tenant_id=None, network_id=None,
ip_version=None, cidr=None, gateway_ip=None, enable_dhcp=None,
ipv6_ra_mode=None, ipv6_address_mode=None, shared=None,
dns_nameservers=None, host_routes=None, allocation_pools=None,
subnetpool_id=None):
self.id = id
self.name = name
self.tenant_id = tenant_id
self.network_id = network_id
self.ip_version = ip_version
self.cidr = cidr
self.gateway_ip = gateway_ip
self.enable_dhcp = enable_dhcp
self.ipv6_ra_mode = ipv6_ra_mode
self.ipv6_address_mode = ipv6_address_mode
self.shared = shared
self.dns_nameservers = dns_nameservers
self.host_routes = host_routes
self.allocation_pools = allocation_pools
self.subnetpool_id = subnetpool_id
@classmethod
def from_dict(cls, model_dict):
host_routes = model_dict.pop('host_routes', [])
allocation_pools = model_dict.pop('allocation_pools', [])
model_dict['host_routes'] = [HostRoute.from_dict(route)
for route in host_routes]
model_dict['allocation_pools'] = [AllocationPool.from_dict(ap)
for ap in allocation_pools]
return super(Subnet, cls).from_dict(model_dict)
class IPAllocation(BaseDataModel):
fields = ['port_id', 'ip_address', 'subnet_id', 'network_id']
def __init__(self, port_id=None, ip_address=None, subnet_id=None,
network_id=None):
self.port_id = port_id
self.ip_address = ip_address
self.subnet_id = subnet_id
self.network_id = network_id
@classmethod
def from_dict(cls, model_dict):
subnet = model_dict.pop('subnet', None)
# TODO(blogan): add subnet to __init__. Can't do it yet because it
# causes issues with converting SA models into data models.
instance = super(IPAllocation, cls).from_dict(model_dict)
setattr(instance, 'subnet', None)
if subnet:
setattr(instance, 'subnet', Subnet.from_dict(subnet))
return instance
class Port(BaseDataModel):
fields = ['id', 'tenant_id', 'name', 'network_id', 'mac_address',
'admin_state_up', 'status', 'device_id', 'device_owner',
'fixed_ips']
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
mac_address=None, admin_state_up=None, status=None,
device_id=None, device_owner=None, fixed_ips=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.network_id = network_id
self.mac_address = mac_address
self.admin_state_up = admin_state_up
self.status = status
self.device_id = device_id
self.device_owner = device_owner
self.fixed_ips = fixed_ips or []
@classmethod
def from_dict(cls, model_dict):
fixed_ips = model_dict.pop('fixed_ips', [])
model_dict['fixed_ips'] = [IPAllocation.from_dict(fixed_ip)
for fixed_ip in fixed_ips]
return super(Port, cls).from_dict(model_dict)
class ProviderResourceAssociation(BaseDataModel):
fields = ['provider_name', 'resource_id']
def __init__(self, provider_name=None, resource_id=None):
self.provider_name = provider_name
self.resource_id = resource_id
@classmethod
def from_dict(cls, model_dict):
device_driver = model_dict.pop('device_driver', None)
instance = super(ProviderResourceAssociation, cls).from_dict(
model_dict)
setattr(instance, 'device_driver', device_driver)
return instance
class SessionPersistence(BaseDataModel):
fields = ['pool_id', 'type', 'cookie_name', 'pool']
def __init__(self, pool_id=None, type=None, cookie_name=None,
pool=None):
self.pool_id = pool_id
self.type = type
self.cookie_name = cookie_name
self.pool = pool
def to_api_dict(self):
return super(SessionPersistence, self).to_dict(pool=False,
pool_id=False)
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return super(SessionPersistence, cls).from_dict(model_dict)
class LoadBalancerStatistics(BaseDataModel):
fields = ['loadbalancer_id', 'bytes_in', 'bytes_out', 'active_connections',
'total_connections', 'loadbalancer']
def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None,
active_connections=None, total_connections=None,
loadbalancer=None):
self.loadbalancer_id = loadbalancer_id
self.bytes_in = bytes_in
self.bytes_out = bytes_out
self.active_connections = active_connections
self.total_connections = total_connections
self.loadbalancer = loadbalancer
def to_api_dict(self):
return super(LoadBalancerStatistics, self).to_dict(
loadbalancer_id=False, loadbalancer=False)
class HealthMonitor(BaseDataModel):
fields = ['id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries',
'http_method', 'url_path', 'expected_codes',
'provisioning_status', 'admin_state_up', 'pool', 'name']
def __init__(self, id=None, tenant_id=None, type=None, delay=None,
timeout=None, max_retries=None, http_method=None,
url_path=None, expected_codes=None, provisioning_status=None,
admin_state_up=None, pool=None, name=None):
self.id = id
self.tenant_id = tenant_id
self.type = type
self.delay = delay
self.timeout = timeout
self.max_retries = max_retries
self.http_method = http_method
self.url_path = url_path
self.expected_codes = expected_codes
self.provisioning_status = provisioning_status
self.admin_state_up = admin_state_up
self.pool = pool
self.name = name
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.loadbalancer)
def to_api_dict(self):
ret_dict = super(HealthMonitor, self).to_dict(
provisioning_status=False, pool=False)
ret_dict['pools'] = []
if self.pool:
ret_dict['pools'].append({'id': self.pool.id})
if self.type in [l_const.HEALTH_MONITOR_TCP,
l_const.HEALTH_MONITOR_PING]:
ret_dict.pop('http_method')
ret_dict.pop('url_path')
ret_dict.pop('expected_codes')
return ret_dict
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return super(HealthMonitor, cls).from_dict(model_dict)
class Pool(BaseDataModel):
fields = ['id', 'tenant_id', 'name', 'description', 'healthmonitor_id',
'protocol', 'lb_algorithm', 'admin_state_up', 'operating_status',
'provisioning_status', 'members', 'healthmonitor',
'session_persistence', 'loadbalancer_id', 'loadbalancer',
'listener', 'listeners', 'l7_policies']
# Map deprecated attribute names to new ones.
attr_mapping = {'sessionpersistence': 'session_persistence'}
def __init__(self, id=None, tenant_id=None, name=None, description=None,
healthmonitor_id=None, protocol=None, lb_algorithm=None,
admin_state_up=None, operating_status=None,
provisioning_status=None, members=None, healthmonitor=None,
session_persistence=None, loadbalancer_id=None,
loadbalancer=None, listener=None, listeners=None,
l7_policies=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.healthmonitor_id = healthmonitor_id
self.protocol = protocol
self.lb_algorithm = lb_algorithm
self.admin_state_up = admin_state_up
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.members = members or []
self.healthmonitor = healthmonitor
self.session_persistence = session_persistence
# NOTE(eezhova): Old attribute name is kept for backwards
# compatibility with out-of-tree drivers.
self.sessionpersistence = self.session_persistence
self.loadbalancer_id = loadbalancer_id
self.loadbalancer = loadbalancer
self.listener = listener
self.listeners = listeners or []
self.l7_policies = l7_policies or []
def attached_to_loadbalancer(self):
return bool(self.loadbalancer)
def to_api_dict(self):
ret_dict = super(Pool, self).to_dict(
provisioning_status=False, operating_status=False,
healthmonitor=False, session_persistence=False,
loadbalancer_id=False, loadbalancer=False, listener_id=False)
ret_dict['loadbalancers'] = []
if self.loadbalancer:
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
ret_dict['session_persistence'] = None
if self.session_persistence:
ret_dict['session_persistence'] = (
self.session_persistence.to_api_dict())
ret_dict['members'] = [{'id': member.id} for member in self.members]
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
if self.listener:
ret_dict['listener_id'] = self.listener.id
else:
ret_dict['listener_id'] = None
ret_dict['l7_policies'] = [{'id': l7_policy.id}
for l7_policy in self.l7_policies]
return ret_dict
@classmethod
def from_dict(cls, model_dict):
healthmonitor = model_dict.pop('healthmonitor', None)
session_persistence = model_dict.pop('session_persistence', None)
model_dict.pop('sessionpersistence', None)
loadbalancer = model_dict.pop('loadbalancer', None)
members = model_dict.pop('members', [])
model_dict['members'] = [Member.from_dict(member)
for member in members]
listeners = model_dict.pop('listeners', [])
model_dict['listeners'] = [Listener.from_dict(listener)
for listener in listeners]
l7_policies = model_dict.pop('l7_policies', [])
model_dict['l7_policies'] = [L7Policy.from_dict(policy)
for policy in l7_policies]
# handle old attribute for out of tree drivers
listener = model_dict.pop('listener', None)
if listener:
model_dict['listener'] = Listener.from_dict(listener)
if healthmonitor:
model_dict['healthmonitor'] = HealthMonitor.from_dict(
healthmonitor)
if session_persistence:
model_dict['session_persistence'] = SessionPersistence.from_dict(
session_persistence)
if loadbalancer:
model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer)
return super(Pool, cls).from_dict(model_dict)
class Member(BaseDataModel):
fields = ['id', 'tenant_id', 'pool_id', 'address', 'protocol_port',
'weight', 'admin_state_up', 'subnet_id', 'operating_status',
'provisioning_status', 'pool', 'name']
def __init__(self, id=None, tenant_id=None, pool_id=None, address=None,
protocol_port=None, weight=None, admin_state_up=None,
subnet_id=None, operating_status=None,
provisioning_status=None, pool=None, name=None):
self.id = id
self.tenant_id = tenant_id
self.pool_id = pool_id
self.address = address
self.protocol_port = protocol_port
self.weight = weight
self.admin_state_up = admin_state_up
self.subnet_id = subnet_id
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.pool = pool
self.name = name
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.loadbalancer)
def to_api_dict(self):
return super(Member, self).to_dict(
provisioning_status=False, operating_status=False, pool=False)
@classmethod
def from_dict(cls, model_dict):
pool = model_dict.pop('pool', None)
if pool:
model_dict['pool'] = Pool.from_dict(
pool)
return super(Member, cls).from_dict(model_dict)
class SNI(BaseDataModel):
fields = ['listener_id', 'tls_container_id', 'position', 'listener']
def __init__(self, listener_id=None, tls_container_id=None,
position=None, listener=None):
self.listener_id = listener_id
self.tls_container_id = tls_container_id
self.position = position
self.listener = listener
def attached_to_loadbalancer(self):
return bool(self.listener and self.listener.loadbalancer)
def to_api_dict(self):
return super(SNI, self).to_dict(listener=False)
class TLSContainer(BaseDataModel):
fields = ['id', 'certificate', 'private_key', 'passphrase',
'intermediates', 'primary_cn']
def __init__(self, id=None, certificate=None, private_key=None,
passphrase=None, intermediates=None, primary_cn=None):
self.id = id
self.certificate = certificate
self.private_key = private_key
self.passphrase = passphrase
self.intermediates = intermediates
self.primary_cn = primary_cn
class L7Rule(BaseDataModel):
fields = ['id', 'tenant_id', 'l7policy_id', 'type', 'compare_type',
'invert', 'key', 'value', 'provisioning_status',
'admin_state_up', 'policy']
def __init__(self, id=None, tenant_id=None,
l7policy_id=None, type=None, compare_type=None, invert=None,
key=None, value=None, provisioning_status=None,
admin_state_up=None, policy=None):
self.id = id
self.tenant_id = tenant_id
self.l7policy_id = l7policy_id
self.type = type
self.compare_type = compare_type
self.invert = invert
self.key = key
self.value = value
self.provisioning_status = provisioning_status
self.admin_state_up = admin_state_up
self.policy = policy
def attached_to_loadbalancer(self):
return bool(self.policy.listener.loadbalancer)
def to_api_dict(self):
ret_dict = super(L7Rule, self).to_dict(
provisioning_status=False,
policy=False, l7policy_id=False)
ret_dict['policies'] = []
if self.policy:
ret_dict['policies'].append({'id': self.policy.id})
return ret_dict
@classmethod
def from_dict(cls, model_dict):
policy = model_dict.pop('policy', None)
if policy:
model_dict['policy'] = L7Policy.from_dict(policy)
return super(L7Rule, cls).from_dict(model_dict)
class L7Policy(BaseDataModel):
fields = ['id', 'tenant_id', 'name', 'description', 'listener_id',
'action', 'redirect_pool_id', 'redirect_url', 'position',
'admin_state_up', 'provisioning_status', 'listener', 'rules',
'redirect_pool']
def __init__(self, id=None, tenant_id=None, name=None, description=None,
listener_id=None, action=None, redirect_pool_id=None,
redirect_url=None, position=None,
admin_state_up=None, provisioning_status=None,
listener=None, rules=None, redirect_pool=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.listener_id = listener_id
self.action = action
self.redirect_pool_id = redirect_pool_id
self.redirect_pool = redirect_pool
self.redirect_url = redirect_url
self.position = position
self.admin_state_up = admin_state_up
self.provisioning_status = provisioning_status
self.listener = listener
self.rules = rules or []
def attached_to_loadbalancer(self):
return bool(self.listener.loadbalancer)
def to_api_dict(self):
ret_dict = super(L7Policy, self).to_dict(
listener=False, listener_id=False,
provisioning_status=False, redirect_pool=False)
ret_dict['listeners'] = []
if self.listener:
ret_dict['listeners'].append({'id': self.listener.id})
ret_dict['rules'] = [{'id': rule.id} for rule in self.rules]
return ret_dict
@classmethod
def from_dict(cls, model_dict):
listener = model_dict.pop('listener', None)
redirect_pool = model_dict.pop('redirect_pool', None)
rules = model_dict.pop('rules', [])
if listener:
model_dict['listener'] = Listener.from_dict(listener)
if redirect_pool:
model_dict['redirect_pool'] = Pool.from_dict(redirect_pool)
model_dict['rules'] = [L7Rule.from_dict(rule)
for rule in rules]
return super(L7Policy, cls).from_dict(model_dict)
class Listener(BaseDataModel):
fields = ['id', 'tenant_id', 'name', 'description', 'default_pool_id',
'loadbalancer_id', 'protocol', 'default_tls_container_id',
'sni_containers', 'protocol_port', 'connection_limit',
'admin_state_up', 'provisioning_status', 'operating_status',
'default_pool', 'loadbalancer', 'l7_policies']
def __init__(self, id=None, tenant_id=None, name=None, description=None,
default_pool_id=None, loadbalancer_id=None, protocol=None,
default_tls_container_id=None, sni_containers=None,
protocol_port=None, connection_limit=None,
admin_state_up=None, provisioning_status=None,
operating_status=None, default_pool=None, loadbalancer=None,
l7_policies=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.default_pool_id = default_pool_id
self.loadbalancer_id = loadbalancer_id
self.protocol = protocol
self.default_tls_container_id = default_tls_container_id
self.sni_containers = sni_containers or []
self.protocol_port = protocol_port
self.connection_limit = connection_limit
self.admin_state_up = admin_state_up
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.default_pool = default_pool
self.loadbalancer = loadbalancer
self.l7_policies = l7_policies or []
def attached_to_loadbalancer(self):
return bool(self.loadbalancer)
def to_api_dict(self):
ret_dict = super(Listener, self).to_dict(
loadbalancer=False, loadbalancer_id=False, default_pool=False,
operating_status=False, provisioning_status=False,
sni_containers=False)
# NOTE(blogan): Returning a list to future proof for M:N objects
# that are not yet implemented.
ret_dict['loadbalancers'] = []
if self.loadbalancer:
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
ret_dict['sni_container_refs'] = [container.tls_container_id
for container in self.sni_containers]
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
ret_dict['l7_policies'] = [{'id': l7_policy.id}
for l7_policy in self.l7_policies]
return ret_dict
@classmethod
def from_dict(cls, model_dict):
default_pool = model_dict.pop('default_pool', None)
loadbalancer = model_dict.pop('loadbalancer', None)
sni_containers = model_dict.pop('sni_containers', [])
model_dict['sni_containers'] = [SNI.from_dict(sni)
for sni in sni_containers]
l7_policies = model_dict.pop('l7_policies', [])
if default_pool:
model_dict['default_pool'] = Pool.from_dict(default_pool)
if loadbalancer:
model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer)
model_dict['l7_policies'] = [L7Policy.from_dict(policy)
for policy in l7_policies]
return super(Listener, cls).from_dict(model_dict)
class LoadBalancer(BaseDataModel):
fields = ['id', 'tenant_id', 'name', 'description', 'vip_subnet_id',
'vip_port_id', 'vip_address', 'provisioning_status',
'operating_status', 'admin_state_up', 'vip_port', 'stats',
'provider', 'listeners', 'pools', 'flavor_id']
def __init__(self, id=None, tenant_id=None, name=None, description=None,
vip_subnet_id=None, vip_port_id=None, vip_address=None,
provisioning_status=None, operating_status=None,
admin_state_up=None, vip_port=None, stats=None,
provider=None, listeners=None, pools=None, flavor_id=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.vip_subnet_id = vip_subnet_id
self.vip_port_id = vip_port_id
self.vip_address = vip_address
self.operating_status = operating_status
self.provisioning_status = provisioning_status
self.admin_state_up = admin_state_up
self.vip_port = vip_port
self.stats = stats
self.provider = provider
self.listeners = listeners or []
self.flavor_id = flavor_id
self.pools = pools or []
def attached_to_loadbalancer(self):
return True
def to_api_dict(self):
ret_dict = super(LoadBalancer, self).to_dict(
vip_port=False, stats=False, listeners=False)
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
if self.provider:
ret_dict['provider'] = self.provider.provider_name
if not self.flavor_id:
del ret_dict['flavor_id']
return ret_dict
@classmethod
def from_dict(cls, model_dict):
listeners = model_dict.pop('listeners', [])
pools = model_dict.pop('pools', [])
vip_port = model_dict.pop('vip_port', None)
provider = model_dict.pop('provider', None)
model_dict.pop('stats', None)
model_dict['listeners'] = [Listener.from_dict(listener)
for listener in listeners]
model_dict['pools'] = [Pool.from_dict(pool)
for pool in pools]
if vip_port:
model_dict['vip_port'] = Port.from_dict(vip_port)
if provider:
model_dict['provider'] = ProviderResourceAssociation.from_dict(
provider)
return super(LoadBalancer, cls).from_dict(model_dict)
NAME_TO_DATA_MODEL_MAP = {
"loadbalancer": LoadBalancer,
"healthmonitor": HealthMonitor,
"listener": Listener,
"sni": SNI,
"pool": Pool,
"member": Member,
"loadbalancerstatistics": LoadBalancerStatistics,
"sessionpersistence": SessionPersistence,
"ipallocation": IPAllocation,
"port": Port,
"providerresourceassociation": ProviderResourceAssociation
}

View File

@ -1,130 +0,0 @@
# Copyright 2011 VMware, Inc., 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Octavia Haproxy driver
"""
from oslo_config import cfg
from gbpservice._i18n import _
haproxy_amphora_opts = [
cfg.StrOpt('base_path',
default='/var/lib/octavia',
help=_('Base directory for amphora files.')),
cfg.StrOpt('base_cert_dir',
default='/var/lib/octavia/certs',
help=_('Base directory for cert storage.')),
cfg.StrOpt('haproxy_template', help=_('Custom haproxy template.')),
cfg.IntOpt('connection_max_retries',
default=10,
help=_('Retry threshold for connecting to amphorae.')),
cfg.IntOpt('connection_retry_interval',
default=5,
help=_('Retry timeout between connection attempts in '
'seconds.')),
cfg.StrOpt('haproxy_stick_size', default='10k',
help=_('Size of the HAProxy stick table. Accepts k, m, g '
'suffixes. Example: 10k')),
# REST server
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.PortOpt('bind_port', default=9443,
help=_("The port to bind to")),
cfg.StrOpt('haproxy_cmd', default='/usr/sbin/haproxy',
help=_("The full path to haproxy")),
cfg.IntOpt('respawn_count', default=2,
help=_("The respawn count for haproxy's upstart script")),
cfg.IntOpt('respawn_interval', default=2,
help=_("The respawn interval for haproxy's upstart script")),
cfg.FloatOpt('rest_request_conn_timeout', default=10,
help=_("The time in seconds to wait for a REST API "
"to connect.")),
cfg.FloatOpt('rest_request_read_timeout', default=60,
help=_("The time in seconds to wait for a REST API "
"response.")),
# REST client
cfg.StrOpt('client_cert', default='/etc/octavia/certs/client.pem',
help=_("The client certificate to talk to the agent")),
cfg.StrOpt('server_ca', default='/etc/octavia/certs/server_ca.pem',
help=_("The ca which signed the server certificates")),
cfg.BoolOpt('use_upstart', default=True,
help=_("If False, use sysvinit.")),
]
certificate_opts = [
cfg.StrOpt('cert_manager',
default='local_cert_manager',
help='Name of the cert manager to use'),
cfg.StrOpt('cert_generator',
default='local_cert_generator',
help='Name of the cert generator to use'),
]
# Options only used by the amphora agent
amphora_agent_opts = [
cfg.StrOpt('agent_server_ca', default='/etc/octavia/certs/client_ca.pem',
help=_("The ca which signed the client certificates")),
cfg.StrOpt('agent_server_cert', default='/etc/octavia/certs/server.pem',
help=_("The server certificate for the agent.py server "
"to use")),
cfg.StrOpt('agent_server_network_dir',
default='/etc/network/interfaces.d/',
help=_("The directory where new network interfaces "
"are located")),
cfg.StrOpt('agent_server_network_file',
help=_("The file where the network interfaces are located. "
"Specifying this will override any value set for "
"agent_server_network_dir.")),
# Do not specify in octavia.conf, loaded at runtime
cfg.StrOpt('amphora_id', help=_("The amphora ID.")),
]
keepalived_vrrp_opts = [
cfg.IntOpt('vrrp_advert_int',
default=1,
help=_('Amphora role and priority advertisement interval '
'in seconds.')),
cfg.IntOpt('vrrp_check_interval',
default=5,
help=_('VRRP health check script run interval in seconds.')),
cfg.IntOpt('vrrp_fail_count',
default=2,
help=_('Number of successive failure before transition to a '
'fail state.')),
cfg.IntOpt('vrrp_success_count',
default=2,
help=_('Number of successive failure before transition to a '
'success state.')),
cfg.IntOpt('vrrp_garp_refresh_interval',
default=5,
help=_('Time in seconds between gratuitous ARP announcements '
'from the MASTER.')),
cfg.IntOpt('vrrp_garp_refresh_count',
default=2,
help=_('Number of gratuitous ARP announcements to make on '
'each refresh interval.'))
]
# Register the configuration options
cfg.CONF.register_opts(amphora_agent_opts, group='amphora_agent')
cfg.CONF.register_opts(certificate_opts, group='certificates')
cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora')
cfg.CONF.register_opts(keepalived_vrrp_opts, group='keepalived_vrrp')
CONF = cfg.CONF

View File

@ -1,699 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.network import data_models as network_data_models
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.common import neutron_lbaas_data_models as n_data_models
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy import haproxy_driver_constants
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy.local_cert_manager import LocalCertManager
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\
v2.haproxy.rest_api_driver import HaproxyAmphoraLoadBalancerDriver
from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import data_parser
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants
from gbpservice.nfp.common import exceptions
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
# Copy from loadbalancer/v1/haproxy/haproxy_lb_driver.py
""" Loadbalancer generic configuration driver for handling device
configuration requests.
"""
class LbGenericConfigDriver(object):
"""
Driver class for implementing loadbalancer configuration
requests from Orchestrator.
"""
def __init__(self):
pass
def configure_interfaces(self, context, resource_data):
""" Configure interfaces for the service VM.
Calls static IP configuration function and implements
persistent rule addition in the service VM.
Issues REST call to service VM for configuration of interfaces.
:param context: neutron context
:param resource_data: a dictionary of loadbalancer objects
send by neutron plugin
Returns: SUCCESS/Failure message with reason.
"""
resource_data = self.parse.parse_data(
common_const.INTERFACES, resource_data)
mgmt_ip = resource_data['mgmt_ip']
try:
result_log_forward = self._configure_log_forwarding(
lbv2_constants.REQUEST_URL, mgmt_ip,
self.port)
except Exception as err:
msg = ("Failed to configure log forwarding for service at %s. "
"Error: %s" % (mgmt_ip, err))
LOG.error(msg)
return msg
else:
if result_log_forward == common_const.UNHANDLED:
pass
elif result_log_forward != lbv2_constants.STATUS_SUCCESS:
msg = ("Failed to configure log forwarding for service at %s. "
% mgmt_ip)
LOG.error(msg)
return result_log_forward
else:
msg = ("Configured log forwarding for service at %s. "
"Result: %s" % (mgmt_ip, result_log_forward))
LOG.info(msg)
return lbv2_constants.STATUS_SUCCESS
# As we use the rest client and amphora image from Octavia,
# we need to have a helper class to simulate Octavia DB operation
# in order to get Octavia data models from Neutron-lbaas data models
class OctaviaDataModelBuilder(object):
def __init__(self, driver=None):
self.driver = driver
# All Octavia data models have these attributes
def _get_common_args(self, obj):
return {
'id': obj.id,
'project_id': obj.tenant_id,
'name': obj.name,
'description': obj.description,
'enabled': obj.admin_state_up,
'operating_status': obj.operating_status,
}
# Update Octavia model from dict
def _update(self, octavia_data_model, update_dict):
for key, value in list(update_dict.items()):
setattr(octavia_data_model, key, value)
return octavia_data_model
# Translate loadbalancer neutron model dict to octavia model
def get_loadbalancer_octavia_model(self, loadbalancer_dict):
loadbalancer = n_data_models.LoadBalancer.from_dict(
copy.deepcopy(loadbalancer_dict))
ret = o_data_models.LoadBalancer()
args = self._get_common_args(loadbalancer)
vip = o_data_models.Vip(
load_balancer_id=loadbalancer.id,
ip_address=loadbalancer.vip_address,
subnet_id=loadbalancer.vip_subnet_id,
port_id=loadbalancer.vip_port.id,
load_balancer=ret
)
amphorae = self.driver.get_amphora(loadbalancer.id)
if not amphorae:
raise exceptions.IncompleteData(
"Amphora information is missing")
# REVISIT(jiahao): cluster_group, topology, affinity_group_id are not
# included yet
args.update({
'vip': vip,
'amphorae': amphorae,
'provisioning_status': loadbalancer.provisioning_status,
})
if loadbalancer_dict.get('listeners'):
listeners = []
pools = []
for listener_dict in loadbalancer_dict.get('listeners'):
listener = self.get_listener_octavia_model(listener_dict)
listener.load_balancer = ret
listeners.append(listener)
pools.extend(listener.pools)
for pool in listener.pools:
if pool.id not in [pool.id for pool in pools]:
pools.append(pool)
args.update({
'listeners': listeners,
'pools': pools,
})
ret = self._update(ret, args)
return ret
# Translate listener neutron model dict to octavia model
def get_listener_octavia_model(self, listener_dict):
# Must use a copy because from_dict will modify the original dict
listener = n_data_models.Listener.from_dict(
copy.deepcopy(listener_dict))
ret = o_data_models.Listener()
args = self._get_common_args(listener)
sni_containers = []
if listener_dict.get('sni_containers'):
for sni_dict in listener_dict.get('sni_containers'):
sni = o_data_models.SNI()
if sni_dict.get('listener'):
sni.listener = self.get_listener_octavia_model(
sni_dict.get('listener'))
sni.listener_id = sni_dict.get('listener_id')
sni.position = sni_dict.get('position')
sni.tls_container_id = sni_dict.get('tls_container_id')
sni_containers.append(sni)
if listener_dict.get('loadbalancer'):
loadbalancer = self.get_loadbalancer_octavia_model(
listener_dict.get('loadbalancer'))
if listener.id not in [_listener.id for _listener
in loadbalancer.listeners]:
loadbalancer.listeners.append(ret)
args.update({
'load_balancer': loadbalancer,
})
if listener_dict.get('default_pool'):
pool = self.get_pool_octavia_model(
listener_dict.get('default_pool'))
if listener.id not in [_listener.id for _listener
in pool.listeners]:
pool.listeners.append(ret)
# REVISIT(jiahao): In Mitaka, we need to handle multiple pools
pools = [pool]
args.update({
'default_pool': pool,
'pools': pools,
})
args.update({
'load_balancer_id': listener.loadbalancer_id,
'protocol': listener.protocol,
'protocol_port': listener.protocol_port,
'connection_limit': listener.connection_limit,
'default_pool_id': listener.default_pool_id,
'tls_certificate_id': listener.default_tls_container_id,
'sni_containers': sni_containers,
'provisioning_status': listener.provisioning_status,
})
ret = self._update(ret, args)
return ret
# Translate pool neutron model dict to octavia model
def get_pool_octavia_model(self, pool_dict):
pool = n_data_models.Pool.from_dict(
copy.deepcopy(pool_dict)
)
ret = o_data_models.Pool()
args = self._get_common_args(pool)
# REVISIT(jiahao): In Mitaka, instead of pool.listener,
# there are pool.listeners. We need to handle that
if pool_dict.get('listener'):
listener = self.get_listener_octavia_model(
pool_dict.get('listener'))
if pool.id not in [_pool.id for _pool in listener.pools]:
listener.pools.append(ret)
if (not listener.default_pool) \
or (listener.default_pool_id == pool.id):
listener.default_pool = ret
listeners = [listener]
args.update({
'listeners': listeners,
})
if listener.load_balancer:
if pool.id not in [_pool.id for _pool
in listener.load_balancer.pools]:
listener.load_balancer.pools.append(ret)
args.update({
'load_balancer': listener.load_balancer,
'load_balancer_id': listener.load_balancer_id,
})
if pool_dict.get('members'):
members = []
for member_dict in pool_dict.get('members'):
member = self.get_member_octavia_model(member_dict)
if not member.pool:
member.pool = ret
members.append(member)
args.update({
'members': members
})
if pool_dict.get('healthmonitor'):
healthmonitor = self.get_healthmonitor_octavia_model(
pool_dict.get('healthmonitor'))
if not healthmonitor.pool:
healthmonitor.pool = ret
args.update({
'health_monitor': healthmonitor
})
# REVISIT(jiahao): L7Policy are not added
args.update({
'protocol': pool.protocol,
'lb_algorithm': pool.lb_algorithm,
'session_persistence': pool.session_persistence,
})
ret = self._update(ret, args)
return ret
# Translate member neutron model dict to octavia model
def get_member_octavia_model(self, member_dict):
member = n_data_models.Member.from_dict(
copy.deepcopy(member_dict)
)
ret = o_data_models.Member()
args = {
'id': member.id,
'project_id': member.tenant_id,
'pool_id': member.pool_id,
'ip_address': member.address,
'protocol_port': member.protocol_port,
'weight': member.weight,
'enabled': member.admin_state_up,
'subnet_id': member.subnet_id,
'operating_status': member.operating_status,
}
if member_dict.get('pool'):
pool = self.get_pool_octavia_model(member_dict.get('pool'))
args.update({
'pool': pool
})
ret = self._update(ret, args)
return ret
# Translate HealthMonitor neutron model dict to octavia model
def get_healthmonitor_octavia_model(self, hm_dict):
hm = n_data_models.HealthMonitor.from_dict(
copy.deepcopy(hm_dict)
)
ret = o_data_models.HealthMonitor()
args = {
'id': hm.id,
'project_id': hm.tenant_id,
'type': hm.type,
'delay': hm.delay,
'timeout': hm.timeout,
'rise_threshold': hm.max_retries,
'fall_threshold': hm.max_retries,
'http_method': hm.http_method,
'url_path': hm.url_path,
'expected_codes': hm.expected_codes,
'enabled': hm.admin_state_up
}
if hm_dict.get('pool'):
pool = self.get_pool_octavia_model(hm_dict.get('pool'))
args.update({
'pool': pool,
'pool_id': pool.id
})
ret = self._update(ret, args)
return ret
@base_driver.set_class_attr(
SERVICE_TYPE=lbv2_constants.SERVICE_TYPE,
SERVICE_VENDOR=haproxy_driver_constants.SERVICE_VENDOR)
class HaproxyLoadBalancerDriver(LbGenericConfigDriver,
base_driver.BaseDriver):
# amphorae = {"loadbalancer_id": [o_data_models.Amphora(
# lb_network_ip, id, status)]}
amphorae = {}
def __init__(self, plugin_rpc=None, conf=None):
# Each of the major LBaaS objects in the neutron database
# need a corresponding manager/handler class.
#
# Put common things that are shared across the entire driver, like
# config or a rest client handle, here.
#
# This function is executed when neutron-server starts.
super(HaproxyLoadBalancerDriver, self).__init__()
self.conf = conf
self.port = haproxy_driver_constants.CONFIGURATION_SERVER_PORT
self.parse = data_parser.DataParser()
self.amphora_driver = HaproxyAmphoraLoadBalancerDriver()
self.cert_manager = LocalCertManager()
self.load_balancer = HaproxyLoadBalancerManager(self)
self.listener = HaproxyListenerManager(self)
self.pool = HaproxyPoolManager(self)
self.member = HaproxyMemberManager(self)
self.health_monitor = HaproxyHealthMonitorManager(self)
self.o_models_builder = OctaviaDataModelBuilder(self)
@classmethod
def get_name(cls):
return haproxy_driver_constants.DRIVER_NAME
# Get Amphora object given the loadbalancer_id
def get_amphora(self, loadbalancer_id):
return self.amphorae.get(loadbalancer_id)
def add_amphora(self, context, loadbalancer_id, description,
status=constants.ACTIVE):
sc_metadata = ast.literal_eval(description)
rdata = self.parse.parse_data(common_const.LOADBALANCERV2, context)
if not (rdata['mgmt_ip'] and sc_metadata.get('network_function_id')):
raise exceptions.IncompleteData(
"Amphora information is missing")
if not self.get_amphora(loadbalancer_id):
# REVISIT(jiahao): use network_function_id as amphora id
amp = o_data_models.Amphora(
lb_network_ip=rdata['mgmt_ip'],
id=sc_metadata['network_function_id'],
status=status)
self.amphorae[loadbalancer_id] = [amp]
class HaproxyCommonManager(object):
def __init__(self, driver):
self.driver = driver
self.parse = data_parser.DataParser()
def _deploy(self, context, obj):
pass
def create(self, context, obj):
msg = ("LB %s, created %s" % (self.__class__.__name__, obj['id']))
LOG.info(msg)
def update(self, context, old_obj, obj):
msg = ("LB %s, updated %s" % (self.__class__.__name__, obj['id']))
LOG.info(msg)
def delete(self, context, obj):
msg = ("LB %s, deleted %s" % (self.__class__.__name__, obj['id']))
LOG.info(msg)
def store_certs(self, listener_obj, listener_dict):
cert_mngr = self.driver.cert_manager
cert_ids = []
if listener_obj.tls_certificate_id:
cert = listener_dict["default_tls_container"]
tls_certificate_id = cert_mngr.store_cert(
project_id=listener_dict["tenant_id"],
certificate=cert["certificate"],
private_key=cert["private_key"],
intermediates=cert["intermediates"]
)
listener_obj.tls_certificate_id = tls_certificate_id
cert_ids.append(tls_certificate_id)
if listener_obj.sni_containers:
for sni_cont in listener_obj.sni_containers:
for cont in listener_dict["sni_containers"]:
if sni_cont.tls_container_id == cont["tls_container_id"]:
cert = cont["tls_container"]
tls_certificate_id = cert_mngr.store_cert(
project_id=listener_dict["tenant_id"],
certificate=cert["certificate"],
private_key=cert["private_key"],
intermediates=cert["intermediates"]
)
sni_cont.tls_container_id = tls_certificate_id
cert_ids.append(tls_certificate_id)
break
return cert_ids
def clean_certs(self, project_id, cert_ids):
cert_mngr = self.driver.cert_manager
for cert_id in cert_ids:
cert_mngr.delete_cert(project_id, cert_id)
class HaproxyLoadBalancerManager(HaproxyCommonManager):
def _get_amphorae_network_config(self,
context,
loadbalancer_dict,
loadbalancer_o_obj):
loadbalancer_n_obj = n_data_models.LoadBalancer.from_dict(
copy.deepcopy(loadbalancer_dict))
amphorae_network_config = {}
for amp in loadbalancer_o_obj.amphorae:
if amp.status != constants.DELETED:
# Get vip_subnet
vip_subnet = None
for subnet_dict in context['service_info']['subnets']:
if subnet_dict['id'] == loadbalancer_n_obj.vip_subnet_id:
vip_subnet = n_data_models.Subnet.from_dict(
copy.deepcopy(subnet_dict))
break
if vip_subnet is None:
raise exceptions.IncompleteData(
"VIP subnet information is not found")
sc_metadata = self.parse.parse_data(
common_const.LOADBALANCERV2, context)
vrrp_port = n_data_models.Port(
mac_address=sc_metadata['provider_mac'])
if vrrp_port is None:
raise exceptions.IncompleteData(
"VRRP port information is not found")
amphorae_network_config[amp.id] = \
network_data_models.AmphoraNetworkConfig(
amphora=amp,
vip_subnet=vip_subnet,
vrrp_port=vrrp_port)
return amphorae_network_config
def create(self, context, loadbalancer):
self.driver.add_amphora(context, loadbalancer['id'],
loadbalancer['description'])
loadbalancer_o_obj = self.driver.o_models_builder.\
get_loadbalancer_octavia_model(loadbalancer)
amphorae_network_config = self._get_amphorae_network_config(
context, loadbalancer, loadbalancer_o_obj)
for amp in loadbalancer_o_obj.amphorae:
self.driver.amphora_driver.post_vip_plug(
amp, loadbalancer_o_obj, amphorae_network_config)
msg = ("LB %s, created %s"
% (self.__class__.__name__, loadbalancer['id']))
LOG.info(msg)
msg = ("Notified amphora of vip plug. "
"Loadbalancer id: %s, vip: %s"
% (loadbalancer['id'], loadbalancer_o_obj.vip.ip_address))
LOG.info(msg)
def update(self, context, old_loadbalancer, loadbalancer):
self.driver.add_amphora(context, loadbalancer['id'],
loadbalancer['description'])
loadbalancer_o_obj = self.driver.o_models_builder.\
get_loadbalancer_octavia_model(loadbalancer)
for listener in loadbalancer_o_obj.listeners:
cert_ids = []
for listener_dict in loadbalancer['listeners']:
if listener.id == listener_dict['id']:
cert_ids = self.store_certs(listener, listener_dict)
break
self.driver.amphora_driver.update(listener, loadbalancer_o_obj.vip)
self.clean_certs(loadbalancer['tenant_id'], cert_ids)
msg = ("LB %s, updated %s"
% (self.__class__.__name__, loadbalancer['id']))
LOG.info(msg)
def delete(self, context, loadbalancer):
msg = ("LB %s, deleted %s"
% (self.__class__.__name__, loadbalancer['id']))
LOG.info(msg)
# delete loadbalancer doesn't need any operation on service vm
@property
def allocates_vip(self):
msg = ('allocates_vip queried')
LOG.info(msg)
return False
def create_and_allocate_vip(self, context, loadbalancer):
msg = ("LB %s, create_and_allocate_vip %s"
% (self.__class__.__name__, loadbalancer['id']))
LOG.info(msg)
self.create(context, loadbalancer)
def refresh(self, context, loadbalancer):
# This is intended to trigger the backend to check and repair
# the state of this load balancer and all of its dependent objects
msg = ("LB pool refresh %s" % (loadbalancer['id']))
LOG.info(msg)
def stats(self, context, loadbalancer):
msg = ("LB stats %s" % (loadbalancer['id']))
LOG.info(msg)
return {
"bytes_in": 0,
"bytes_out": 0,
"active_connections": 0,
"total_connections": 0
}
class HaproxyListenerManager(HaproxyCommonManager):
def _deploy(self, context, listener):
self.driver.add_amphora(context, listener['loadbalancer_id'],
listener['description'])
listener_o_obj = self.driver.o_models_builder.\
get_listener_octavia_model(listener)
cert_ids = self.store_certs(listener_o_obj, listener)
self.driver.amphora_driver.update(listener_o_obj,
listener_o_obj.load_balancer.vip)
self.clean_certs(listener['tenant_id'], cert_ids)
def create(self, context, listener):
self._deploy(context, listener)
msg = ("LB %s, created %s" % (self.__class__.__name__, listener['id']))
LOG.info(msg)
def update(self, context, old_listener, listener):
self._deploy(context, listener)
msg = ("LB %s, updated %s" % (self.__class__.__name__, listener['id']))
LOG.info(msg)
def delete(self, context, listener):
self.driver.add_amphora(context, listener['loadbalancer_id'],
listener['description'])
listener_o_obj = self.driver.o_models_builder.\
get_listener_octavia_model(listener)
self.driver.amphora_driver.delete(listener_o_obj,
listener_o_obj.load_balancer.vip)
msg = ("LB %s, deleted %s" % (self.__class__.__name__, listener['id']))
LOG.info(msg)
class HaproxyPoolManager(HaproxyCommonManager):
def _remove_pool(self, pool):
pool_id = pool['id']
# REVISIT(jiahao): In Mitaka, we need to handle multiple pools
default_pool = pool['listener']['default_pool']
if default_pool['id'] == pool_id:
pool['listener']['default_pool'] = None
def _deploy(self, context, pool):
self.driver.add_amphora(context, pool['loadbalancer_id'],
pool['description'])
pool_o_obj = self.driver.o_models_builder.\
get_pool_octavia_model(pool)
# For Mitaka, that would be multiple listeners within pool
listener_o_obj = pool_o_obj.listeners[0]
load_balancer_o_obj = pool_o_obj.load_balancer
cert_ids = self.store_certs(listener_o_obj,
pool['listeners'][0])
self.driver.amphora_driver.update(listener_o_obj,
load_balancer_o_obj.vip)
self.clean_certs(pool['tenant_id'], cert_ids)
def create(self, context, pool):
self._deploy(context, pool)
msg = ("LB %s, created %s" % (self.__class__.__name__, pool['id']))
LOG.info(msg)
def update(self, context, old_pool, pool):
self._deploy(context, pool)
msg = ("LB %s, updated %s" % (self.__class__.__name__, pool['id']))
LOG.info(msg)
def delete(self, context, pool):
self._remove_pool(pool)
self._deploy(context, pool)
msg = ("LB %s, deleted %s" % (self.__class__.__name__, pool['id']))
LOG.info(msg)
class HaproxyMemberManager(HaproxyCommonManager):
def _deploy(self, context, member):
self.driver.add_amphora(context, member['pool']['loadbalancer_id'],
member['description'])
member_o_obj = self.driver.o_models_builder.\
get_member_octavia_model(member)
listener_o_obj = member_o_obj.pool.listeners[0]
load_balancer_o_obj = member_o_obj.pool.load_balancer
cert_ids = self.store_certs(listener_o_obj,
member['pool']['listeners'][0])
self.driver.amphora_driver.update(listener_o_obj,
load_balancer_o_obj.vip)
self.clean_certs(member['tenant_id'], cert_ids)
def _remove_member(self, member):
member_id = member['id']
# REVISIT(jiahao): In Mitaka, we need to handle multiple pools
default_pool = member['pool']['listener']['default_pool']
for index, item in enumerate(default_pool['members']):
if item['id'] == member_id:
default_pool['members'].pop(index)
break
def create(self, context, member):
self._deploy(context, member)
msg = ("LB %s, created %s" % (self.__class__.__name__, member['id']))
LOG.info(msg)
def update(self, context, old_member, member):
self._deploy(context, member)
msg = ("LB %s, updated %s" % (self.__class__.__name__, member['id']))
LOG.info(msg)
def delete(self, context, member):
self._remove_member(member)
self._deploy(context, member)
msg = ("LB %s, deleted %s" % (self.__class__.__name__, member['id']))
LOG.info(msg)
class HaproxyHealthMonitorManager(HaproxyCommonManager):
def _deploy(self, context, hm):
self.driver.add_amphora(context, hm['pool']['loadbalancer_id'],
hm['description'])
hm_o_obj = self.driver.o_models_builder.\
get_healthmonitor_octavia_model(hm)
listener_o_obj = hm_o_obj.pool.listeners[0]
load_balancer_o_obj = hm_o_obj.pool.load_balancer
cert_ids = self.store_certs(listener_o_obj,
hm['pool']['listeners'][0])
self.driver.amphora_driver.update(listener_o_obj,
load_balancer_o_obj.vip)
self.clean_certs(hm['tenant_id'], cert_ids)
def _remove_healthmonitor(self, hm):
hm_id = hm['id']
default_pool = hm['pool']['listener']['default_pool']
if default_pool['healthmonitor']['id'] == hm_id:
default_pool['healthmonitor'] = None
def create(self, context, hm):
self._deploy(context, hm)
msg = ("LB %s, created %s" % (self.__class__.__name__, hm['id']))
LOG.info(msg)
def update(self, context, old_hm, hm):
self._deploy(context, hm)
msg = ("LB %s, updated %s" % (self.__class__.__name__, hm['id']))
LOG.info(msg)
def delete(self, context, hm):
self._remove_healthmonitor(hm)
self._deploy(context, hm)
msg = ("LB %s, deleted %s" % (self.__class__.__name__, hm['id']))
LOG.info(msg)

View File

@ -1,15 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_VENDOR = 'haproxy'
DRIVER_NAME = 'loadbalancerv2'
CONFIGURATION_SERVER_PORT = '9443'

View File

@ -1,171 +0,0 @@
# Copyright (c) 2014 Rackspace US, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from octavia.certificates.common import local as local_common
from octavia.certificates.manager import cert_mgr
from octavia.common import exceptions
from oslo_config import cfg
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_group('certificates', 'octavia.certificates.common.local')
CONF.set_default('storage_path', '/tmp/', group='certificates')
class LocalCertManager(cert_mgr.CertManager):
"""Cert Manager Interface that stores data locally."""
@staticmethod
def store_cert(project_id, certificate, private_key, intermediates=None,
private_key_passphrase=None, **kwargs):
"""Stores (i.e., registers) a cert with the cert manager.
This method stores the specified cert to the filesystem and returns
a UUID that can be used to retrieve it.
:param project_id: Project ID for the owner of the certificate
:param certificate: PEM encoded TLS certificate
:param private_key: private key for the supplied certificate
:param intermediates: ordered and concatenated intermediate certs
:param private_key_passphrase: optional passphrase for the supplied key
:returns: the UUID of the stored cert
:raises CertificateStorageException: if certificate storage fails
"""
cert_ref = str(uuid.uuid4())
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
LOG.info(
"Storing certificate data on the local filesystem."
)
try:
filename_certificate = "{0}.crt".format(filename_base, cert_ref)
with open(filename_certificate, 'w') as cert_file:
cert_file.write(certificate)
filename_private_key = "{0}.key".format(filename_base, cert_ref)
with open(filename_private_key, 'w') as key_file:
key_file.write(private_key)
if intermediates:
filename_intermediates = "{0}.int".format(filename_base,
cert_ref)
with open(filename_intermediates, 'w') as int_file:
int_file.write(intermediates)
if private_key_passphrase:
filename_pkp = "{0}.pass".format(filename_base, cert_ref)
with open(filename_pkp, 'w') as pass_file:
pass_file.write(private_key_passphrase)
except IOError as ioe:
LOG.error("Failed to store certificate.")
raise exceptions.CertificateStorageException(message=ioe.message)
return cert_ref
@staticmethod
def get_cert(project_id, cert_ref, **kwargs):
"""Retrieves the specified cert.
:param project_id: Project ID for the owner of the certificate
:param cert_ref: the UUID of the cert to retrieve
:return: octavia.certificates.common.Cert representation of the
certificate data
:raises CertificateStorageException: if certificate retrieval fails
"""
LOG.info(
"Loading certificate {0} from the local filesystem.".format(
cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base, cert_ref)
filename_private_key = "{0}.key".format(filename_base, cert_ref)
filename_intermediates = "{0}.int".format(filename_base, cert_ref)
filename_pkp = "{0}.pass".format(filename_base, cert_ref)
cert_data = dict()
try:
with open(filename_certificate, 'r') as cert_file:
cert_data['certificate'] = cert_file.read()
except IOError:
LOG.error(
"Failed to read certificate for {0}.".format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Certificate could not be read."
)
try:
with open(filename_private_key, 'r') as key_file:
cert_data['private_key'] = key_file.read()
except IOError:
LOG.error(
"Failed to read private key for {0}.".format(cert_ref))
raise exceptions.CertificateStorageException(
msg="Private Key could not be read."
)
try:
with open(filename_intermediates, 'r') as int_file:
cert_data['intermediates'] = int_file.read()
except IOError:
pass
try:
with open(filename_pkp, 'r') as pass_file:
cert_data['private_key_passphrase'] = pass_file.read()
except IOError:
pass
return local_common.LocalCert(**cert_data)
@staticmethod
def delete_cert(project_id, cert_ref, **kwargs):
"""Deletes the specified cert.
:param project_id: Project ID for the owner of the certificate
:param cert_ref: the UUID of the cert to delete
:raises CertificateStorageException: if certificate deletion fails
"""
LOG.info(
"Deleting certificate {0} from the local filesystem.".format(
cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
filename_certificate = "{0}.crt".format(filename_base, cert_ref)
filename_private_key = "{0}.key".format(filename_base, cert_ref)
filename_intermediates = "{0}.int".format(filename_base, cert_ref)
filename_pkp = "{0}.pass".format(filename_base, cert_ref)
try:
os.remove(filename_certificate)
os.remove(filename_private_key)
if os.path.exists(filename_intermediates):
os.remove(filename_intermediates)
if os.path.exists(filename_pkp):
os.remove(filename_pkp)
except IOError as ioe:
LOG.error(
"Failed to delete certificate {0}.".format(cert_ref))
raise exceptions.CertificateStorageException(message=ioe.message)

View File

@ -1,103 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
import warnings
# Override unnecessary Octavia config import
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy \
import config
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.\
config import cfg
sys.modules['octavia.common.config'] = config
sys.modules['octavia.common.config.cfg'] = cfg
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.amphorae.drivers.haproxy import rest_api_driver
from octavia.common.jinja.haproxy import jinja_cfg
from oslo_config import cfg
import requests
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.\
local_cert_manager import LocalCertManager
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
API_VERSION = rest_api_driver.API_VERSION
OCTAVIA_API_CLIENT = rest_api_driver.OCTAVIA_API_CLIENT
CONF = cfg.CONF
CONF.import_group('haproxy_amphora', 'octavia.common.config')
class HaproxyAmphoraLoadBalancerDriver(
rest_api_driver.HaproxyAmphoraLoadBalancerDriver):
def __init__(self):
super(rest_api_driver.HaproxyAmphoraLoadBalancerDriver,
self).__init__()
self.client = AmphoraAPIClient()
self.cert_manager = LocalCertManager()
self.jinja = jinja_cfg.JinjaTemplater(
base_amp_path=CONF.haproxy_amphora.base_path,
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
haproxy_template=CONF.haproxy_amphora.haproxy_template)
class AmphoraAPIClient(rest_api_driver.AmphoraAPIClient):
"""Removed SSL verification from original api client"""
def __init__(self):
super(AmphoraAPIClient, self).__init__()
self.session = requests.Session()
def _base_url(self, ip):
return "http://{ip}:{port}/{version}/".format(
ip=ip,
port=CONF.haproxy_amphora.bind_port,
version=API_VERSION)
def request(self, method, amp, path='/', **kwargs):
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url " + _url)
timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout,
CONF.haproxy_amphora.rest_request_read_timeout)
reqargs = {
'url': _url,
'timeout': timeout_tuple, }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
# Keep retrying
for a in range(CONF.haproxy_amphora.connection_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug("Connected to amphora. Response: {resp}".format(
resp=r))
return r
except (requests.ConnectionError, requests.Timeout):
LOG.warning("Could not connect to instance. Retrying.")
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
LOG.error("Connection retries (currently set to %s) "
"exhausted. The amphora is unavailable.",
CONF.haproxy_amphora.connection_max_retries)
raise driver_except.TimeOutException()

View File

@ -1,42 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.lib import (
nfp_service_constants as const)
LOG = nfp_logging.getLogger(__name__)
class HeatDriver(base_driver.BaseDriver):
""" Heat as a driver for handling config script
heat configuration requests.
We initialize service type in this class because agent loads
class object only for those driver classes that have service type
initialized. Also, only this driver class is exposed to the agent.
"""
service_type = const.SERVICE_TYPE
resource_type = const.HEAT_RESOURCE
def __init__(self, conf):
pass
def run_heat(self, context, kwargs):
msg = ("Heat template execution request received but unhandled")
LOG.info(msg)
return const.UNHANDLED_RESULT

View File

@ -1,18 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_VENDOR = 'vyos'
CONFIGURATION_SERVER_PORT = 8888
request_url = "http://%s:%s/%s"
REST_TIMEOUT = 180

View File

@ -1,55 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
supported_service_types = ['firewall', 'vpn', 'loadbalancerv2']
NFP_SERVICE_LIST = ['heat', 'ansible']
invalid_service_type = 'invalid'
NFP_SERVICE = 'nfp_service'
SUCCESS = 'SUCCESS'
FAILED = 'FAILED'
FAILURE = 'FAILURE'
GENERIC_CONFIG = 'generic_config'
ORCHESTRATOR = 'orchestrator'
EVENT_STASH = 'STASH_EVENT'
EVENT_PROCESS_BATCH = 'PROCESS_BATCH'
NFD_NOTIFICATION = 'network_function_device_notification'
RABBITMQ_HOST = '127.0.0.1' # send notifications to 'RABBITMQ_HOST'
NOTIFICATION_QUEUE = 'configurator-notifications'
FIREWALL = 'firewall'
VPN = 'vpn'
VYOS = 'vyos'
LOADBALANCERV2 = 'loadbalancerv2'
HAPROXY_LBAASV2 = 'haproxy'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
POST = 'post'
PUT = 'put'
UNHANDLED = "UNHANDLED"
HEALTHMONITOR = 'healthmonitor'
INTERFACES = 'interfaces'
ROUTES = 'routes'
SUCCESS_CODES = [200, 201, 202, 203, 204]
ERROR_CODES = [400, 404, 500]
STATUS_ACTIVE = "ACTIVE"
STATUS_DELETED = "DELETED"
STATUS_UPDATED = "UPDATED"
STATUS_ERROR = "ERROR"
STATUS_SUCCESS = "SUCCESS"
UNHANDLED = "UNHANDLED"
DOWN = "Down"
AGENTS_PKG = ['gbpservice.contrib.nfp.configurator.agents']
CONFIGURATOR_RPC_TOPIC = 'configurator'

View File

@ -1,239 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
class Filter(object):
""" Filter class which provides data asked in a specific format.
This class mocks all rpc calls going from *aaS agent/driver to respective
*aaS plugin.
"""
def __init__(self, topic, default_version):
pass
def call(self, context, msg):
"""Returns data in specific format after applying filter on context
:param context
:param msg e.g {'args': {'key': value,..},'method': 'function_name'}}
Returns: data after applying filter on it
"""
filters = {}
try:
for fk, fv in list(msg['args'].items()):
if dict == type(fv):
filters = fv
break
if fv:
filters = {fk: fv}
break
method = getattr(self, '_%s' % (msg['method']))
return method(context, filters)
except Exception as e:
raise e
def make_msg(self, method, **kwargs):
""" Helper function needed to invoke Filter.call()
:param method - method name
:kwargs kwargs - filters to be used
Returns: dict
"""
return {'method': method,
'args': kwargs}
def apply_filter(self, data, filters):
""" Apply filters on data
:param filters e.g {k:[v],k:[v]}
:param data e.g [{k:v,k:v,k:v},
{k:v,k:v,k:v},
{k:v,k:v}]
Returns: data after applying filter on it
"""
for fk, fv in list(filters.items()):
for d in data[:]:
if d.get(fk) is None:
data.remove(d)
if fk in d and d[fk] != fv[0]:
data.remove(d)
return data
def get_record(self, data, key, value):
"""Get single record based on key and value
:param data
:praam key
:param value
Returns: record
"""
for d in data:
if key in d and d[key] == value:
return d
def _get_vpn_services(self, context, filters):
""" Get vpn service from context after applying filter
:param context - vpn related resources
e.g context = {'service_info':{'vpnservices': [vpnservices],
'ikepolicies': [ikepolicies],
'ipsecpolicies':[ipsecpolicies],
'ipsec_site_conns':
[ipsec_site_connections],
'routers': [routers],
'subnets': [subnets]
}
}
:param filters
e.g { 'ids' : [vpn service ids],
'filters': filters }
Returns: [vpn services]
"""
vpn_ids = None
if 'ids' in filters and filters['ids']:
vpn_ids = filters['ids']
service_info = context['service_info']
vpnservices = service_info['vpnservices']
filtered_vpns = []
if vpn_ids:
for vpn_id in vpn_ids:
filtered_vpns.append(
self.get_record(vpnservices, 'id', vpn_id))
return filtered_vpns
else:
return self.apply_filter(vpnservices, filters)
def _get_ipsec_conns(self, context, filters):
""" Get ipsec site conns from context after applying filter
:param context - vpn related resources
e.g context = {'service_info':{'vpnservices': [vpnservices],
'ikepolicies': [ikepolicies],
'ipsecpolicies':[ipsecpolicies],
'ipsec_site_conns':
[ipsec_site_connections],
'routers': [routers],
'subnets': [subnets]
}
}
:param filters e.g { 'tenant_id': [tenant_id],
'peer_address': [conn['peer_address']]
}
Returns: [ipsec site conns]
"""
service_info = context['service_info']
ipsec_conns = copy.deepcopy(service_info['ipsec_site_conns'])
return self.apply_filter(ipsec_conns, filters)
def _get_vpn_servicecontext(self, context, filters):
"""Get vpnservice context
:param context
:param filters
Returns IPSec site conns
"""
return self._get_ipsec_site2site_contexts(context, filters)
def _get_ipsec_site2site_contexts(self, context, filters=None):
""" Get ipsec site to site context
:param filters
e.g {'tenant_id': <value>,
'vpnservice_id': <value>,
'siteconn_id': <value>
}
'tenant_id' - To get s2s conns of that tenant
'vpnservice_id' - To get s2s conns of that vpn service
'siteconn_id' - To get a specific s2s conn
Returns: vpnservices
e.g { 'vpnserviceid':
{ 'service': <VPNService>,
'siteconns':[ {
'connection': <IPSECsiteconnections>,
'ikepolicy': <IKEPolicy>,
'ipsecpolicy': <IPSECPolicy>
}
]
}
}
"""
if not filters:
filters = {}
service_info = context['service_info']
vpnservices = {}
s_filters = {}
if 'tenant_id' in filters:
s_filters['tenant_id'] = [filters['tenant_id']]
if 'vpnservice_id' in filters:
s_filters['vpnservice_id'] = [filters['vpnservice_id']]
if 'siteconn_id' in filters:
s_filters['id'] = [filters['siteconn_id']]
if 'peer_address' in filters:
s_filters['peer_address'] = [filters['peer_address']]
ipsec_site_conns = self.apply_filter(service_info['ipsec_site_conns'],
s_filters)
for conn in ipsec_site_conns:
vpnservice = [vpn for vpn in service_info['vpnservices']
if vpn['id'] == conn['vpnservice_id']][0]
ikepolicy = [ikepolicy for ikepolicy in service_info['ikepolicies']
if ikepolicy['id'] == conn['ikepolicy_id']][0]
ipsecpolicy = [ipsecpolicy for ipsecpolicy in
service_info['ipsecpolicies']
if ipsecpolicy['id'] == conn['ipsecpolicy_id']][0]
"""
Get the local subnet cidr
"""
siteconn = {}
siteconn['connection'] = conn
siteconn['ikepolicy'] = ikepolicy
siteconn['ipsecpolicy'] = ipsecpolicy
vpnserviceid = vpnservice['id']
if vpnserviceid not in list(vpnservices.keys()):
vpnservices[vpnserviceid] = \
{'service': vpnservice, 'siteconns': []}
vpnservices[vpnserviceid]['siteconns'].append(siteconn)
site2site_context = self._make_vpnservice_context(vpnservices)
return site2site_context
def _make_vpnservice_context(self, vpnservices):
"""Generate vpnservice context from the dictionary of vpnservices.
See, if some values are not needed by agent-driver, do not pass them.
As of now, passing everything.
"""
return list(vpnservices.values())

View File

@ -1,96 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from gbpservice.nfp.common import constants as const
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class DataParser(object):
''' A library to parse device and service configuration and
transform them into a dictionary of key-value pairs
'''
def __init__(self):
pass
def parse_data(self, resource, data):
''' Parser function exposed to the configurator modules.
:param resource: Resource name (HEALTHMONITOR/INTERFACES/ROUTES/
FIREWALL/LOADBALANCER/LOADBALANCERV2/VPN)
:param data: Resource data dictionary in case of device configuration
and context in case of service configuration
Returns: a dictionary if nfds/nfs contains a single element else
a list of dictionaries where each dictionary corresponds
to each element in nfds/nfs
'''
config_data_list = []
if data.get('nfds'):
tenant_id = data['tenant_id']
nf_config_list = data['nfds']
elif data.get('resource_data'):
tenant_id = data['resource_data']['tenant_id']
nf_config_list = data['resource_data']['nfs']
else:
msg = ("The given schema of data dictionary is not supported "
"by the data parser library. Returning the input. "
"Input data is: %s" % data)
LOG.debug(msg)
return data
for nf_config in nf_config_list:
self.resource_data = {}
self.resource_data.update({
'tenant_id': tenant_id,
'role': nf_config['role'],
'mgmt_ip': nf_config['svc_mgmt_fixed_ip']})
self._parse_config_data(nf_config, resource)
config_data_list.append(copy.deepcopy(self.resource_data))
return (config_data_list[0]
if len(config_data_list) == 1
else config_data_list)
def _parse_config_data(self, nfd, resource):
if resource.lower() == const.HEALTHMONITOR_RESOURCE:
return self.resource_data.update(
{'periodicity': nfd['periodicity'],
'vmid': nfd['vmid']})
networks = nfd['networks']
for network in networks:
prefix = network['type']
port = network['ports'][0]
self.resource_data.update({
(prefix + '_cidr'): network['cidr'],
(prefix + '_ip'): port['fixed_ip'],
(prefix + '_floating_ip'): port['floating_ip'],
(prefix + '_mac'): port['mac'],
(prefix + '_gw_ip'): network['gw_ip']})
vips = nfd.get('vips')
if not vips:
return
for vip in vips:
prefix = vip['type'] + '_vip'
self.resource_data.update({
(prefix + '_ip'): vip['ip'],
(prefix + '_mac'): vip['mac']})

View File

@ -1,165 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.configurator.lib import constants as const
class ServiceAgentDemuxer(object):
"""Implements supporting methods for configurator module.
Provides methods that take configurator API request data and helps
configurator to de-multiplex the API calls to different service agents
and drivers.
Format of request data for network device configuration API:
request_data {
info {
version: <v1/v2/v3>
}
config [
{
'resource': <healthmonitor/routes/interfaces>,
'kwargs': <resource parameters>
},
{
'resource': <healthmonitor/routes/interfaces>,
'kwargs': <resource parameters>
}, ...
]
}
Format of request data for network service configuration API:
request_data {
info {
version: <v1/v2/v3>
type: <firewall/vpn/loadbalancer>
}
config [
{
'resource': <healthmonitor/routes/interfaces>,
'kwargs': <resource parameters>
},
{
'resource': <healthmonitor/routes/interfaces>,
'kwargs': <resource parameters>
}, ...
]
}
"""
def __init__(self):
pass
def get_service_type(self, request_data):
"""Retrieves service type from request data.
:param request_data: API input data (format specified at top of file)
Returns:
(1) "firewall"/"vpn"/"loadbalancer"
(2) "generic_config" if service_type field is absent in request_data
(3) "invalid" if any other service type is provided in request_data
"""
# Get service type based on the fact that for some request data
# formats the 'type' key is absent. Check for invalid types
service_type = request_data['info'].get('service_type').lower()
return service_type
def get_service_agent_info(self, operation, resource_type,
request_data, is_generic_config):
"""Prepares information for service agent consumption.
:param operation: create/delete/update
:param resource_type: firewall/vpn/loadbalancer/generic_config
:param request_data: API input data (format specified at top of file)
Returns: List with the following format.
sa_info_list [
{
'context': <context dictionary>
'resource_type': <firewall/vpn/loadbalancer/generic_config>
'method': <*aas RPC methods/generic configuration methods>
'kwargs' <kwargs taken from request data of API>
}
]
"""
sa_info_list = []
vendor_map = {const.FIREWALL: const.VYOS,
const.VPN: const.VYOS,
const.LOADBALANCERV2: const.HAPROXY_LBAASV2}
service_vendor = request_data['info']['service_vendor']
if str(service_vendor) == 'None':
service_vendor = vendor_map[resource_type]
service_feature = request_data['info'].get('service_feature')
if not service_feature:
service_feature = ''
for config_data in request_data['config']:
sa_info = {}
resource_type_to_method_map = {
const.FIREWALL: (operation + '_' + config_data['resource']),
const.VPN: ('vpnservice_updated'),
const.LOADBALANCERV2: (operation + '_' + config_data[
'resource']),
const.NFP_SERVICE: ('run' + '_' + const.NFP_SERVICE),
const.GENERIC_CONFIG: {
const.CREATE: ('configure_' + config_data[
'resource']),
const.UPDATE: ('update_' + config_data['resource']),
const.DELETE: ('clear_' + config_data['resource'])}}
context = request_data['info']['context']
data = config_data['resource_data']
if not data:
return None
resource = config_data['resource']
is_nfp_svc = True if resource in const.NFP_SERVICE_LIST else False
if is_generic_config:
method = resource_type_to_method_map[
const.GENERIC_CONFIG][operation]
else:
if is_nfp_svc:
resource_type = const.NFP_SERVICE
try:
method = resource_type_to_method_map[resource_type]
except Exception:
method = 'handle_config'
sa_info.update({'method': method,
'resource_data': data,
'agent_info': {
# This is the API context
'context': context,
'service_vendor': service_vendor.lower(),
'service_feature': service_feature,
'resource_type': resource_type.lower(),
'resource': resource.lower()},
'is_generic_config': is_generic_config})
sa_info_list.append(sa_info)
if is_nfp_svc:
resource_type = const.NFP_SERVICE
elif is_generic_config:
resource_type = const.GENERIC_CONFIG
return sa_info_list, resource_type

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ACTIVE = "ACTIVE"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)

View File

@ -1,17 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'firewall'
FIREWALL_CREATE_EVENT = 'CREATE_FIREWALL'
FIREWALL_UPDATE_EVENT = 'UPDATE_FIREWALL'
FIREWALL_DELETE_EVENT = 'DELETE_FIREWALL'

View File

@ -1,35 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'generic_config'
EVENT_CONFIGURE_INTERFACES = 'CONFIGURE_INTERFACES'
EVENT_CLEAR_INTERFACES = 'CLEAR_INTERFACES'
EVENT_CONFIGURE_ROUTES = 'CONFIGURE_ROUTES'
EVENT_CLEAR_ROUTES = 'CLEAR_ROUTES'
EVENT_CONFIGURE_HEALTHMONITOR = 'CONFIGURE_HEALTHMONITOR'
EVENT_CLEAR_HEALTHMONITOR = 'CLEAR_HEALTHMONITOR'
# REVISIT: Need to make this configurable
MAX_FAIL_COUNT = 5
INITIAL = 'initial'
FOREVER = 'forever'
DEVICE_TO_BECOME_DOWN = 'DEVICE_TO_BECOME_DOWN'
DEVICE_TO_BECOME_UP = 'DEVICE_TO_BECOME_UP'
PERIODIC_HM = 'periodic_healthmonitor'
DEVICE_NOT_REACHABLE = 'PERIODIC_HM_DEVICE_NOT_REACHABLE'
DEVICE_REACHABLE = 'PERIODIC_HM_DEVICE_REACHABLE'
# POLLING EVENTS SPACING AND MAXRETRIES
EVENT_CONFIGURE_HEALTHMONITOR_SPACING = 10 # unit in sec.
EVENT_CONFIGURE_HEALTHMONITOR_MAXRETRY = 100

View File

@ -1,113 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'loadbalancerv2'
NEUTRON = 'neutron'
LBAAS_AGENT_RPC_TOPIC = 'lbaasv2_agent'
LBAAS_GENERIC_CONFIG_RPC_TOPIC = 'lbaas_generic_config'
LBAAS_PLUGIN_RPC_TOPIC = 'n-lbaas-plugin'
AGENT_TYPE_LOADBALANCER = 'OC Loadbalancer V2 agent'
# Resources names
LOADBALANCER = 'loadbalancer'
LISTENER = 'listener'
POOL = 'pool'
MEMBER = 'member'
HEALTHMONITOR = 'healthmonitor'
SNI = 'sni'
L7POLICY = 'l7policy'
L7RULE = 'l7rule'
# Resources names for update apis
OLD_LOADBALANCER = 'old_loadbalancer'
OLD_LISTENER = 'old_listener'
OLD_POOL = 'old_pool'
OLD_MEMBER = 'old_member'
OLD_HEALTHMONITOR = 'old_healthmonitor'
# Operations
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
CREATED = "CREATED"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
STATUS_SUCCESS = "SUCCESS"
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)
REQUEST_URL = "http://%s:%s/%s"
# Constants to extend status strings in neutron.plugins.common.constants
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
DEGRADED = 'DEGRADED'
DISABLED = 'DISABLED'
NO_MONITOR = 'NO_MONITOR'
""" HTTP request/response """
HTTP_REQ_METHOD_POST = 'POST'
HTTP_REQ_METHOD_GET = 'GET'
HTTP_REQ_METHOD_PUT = 'PUT'
HTTP_REQ_METHOD_DELETE = 'DELETE'
CONTENT_TYPE_HEADER = 'Content-type'
JSON_CONTENT_TYPE = 'application/json'
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
LBAAS = 'lbaas'
""" Event ids """
EVENT_CREATE_LOADBALANCER_V2 = 'CREATE_LOADBALANCER_V2'
EVENT_UPDATE_LOADBALANCER_V2 = 'UPDATE_LOADBALANCER_V2'
EVENT_DELETE_LOADBALANCER_V2 = 'DELETE_LOADBALANCER_V2'
EVENT_CREATE_LISTENER_V2 = 'CREATE_LISTENER_V2'
EVENT_UPDATE_LISTENER_V2 = 'UPDATE_LISTENER_V2'
EVENT_DELETE_LISTENER_V2 = 'DELETE_LISTENER_V2'
EVENT_CREATE_POOL_V2 = 'CREATE_POOL_V2'
EVENT_UPDATE_POOL_V2 = 'UPDATE_POOL_V2'
EVENT_DELETE_POOL_V2 = 'DELETE_POOL_V2'
EVENT_CREATE_MEMBER_V2 = 'CREATE_MEMBER_V2'
EVENT_UPDATE_MEMBER_V2 = 'UPDATE_MEMBER_V2'
EVENT_DELETE_MEMBER_V2 = 'DELETE_MEMBER_V2'
EVENT_CREATE_HEALTH_MONITOR_V2 = 'CREATE_HEALTH_MONITOR_V2'
EVENT_UPDATE_HEALTH_MONITOR_V2 = 'UPDATE_HEALTH_MONITOR_V2'
EVENT_DELETE_HEALTH_MONITOR_V2 = 'DELETE_HEALTH_MONITOR_V2'
EVENT_AGENT_UPDATED_V2 = 'AGENT_UPDATED_V2'
EVENT_COLLECT_STATS_V2 = 'COLLECT_STATS_V2'

View File

@ -1,20 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'nfp_service'
CREATE_NFP_SERVICE_EVENT = 'CREATE_NFP_SERVICE'
UNHANDLED_RESULT = 'unhandled'
ERROR_RESULT = 'error'
HEAT_RESOURCE = 'HEAT'
NFP_SERVICE = 'nfp_service'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'

View File

@ -1,119 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
import sys
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class ConfiguratorUtils(object):
"""Utility class which provides common library functions for configurator.
New common library functions, if needed, should be added in this class.
"""
def __init__(self, conf):
self.conf = conf
def load_drivers(self, service_type=None):
"""Load all the driver class objects inside pkg. In each class in the
pkg it will look for keywork 'service_type' or/and 'vendor' and
select that class as driver class
@param service_type: firewall/vpn/loadbalancer/nfp_service/None
Returns: driver_objects dictionary
e.g driver_objects = {'loadbalancer': <driver class object>}
"""
pkgs = self.conf.CONFIG_DRIVERS.drivers
driver_objects = {}
modules = []
subdirectories = []
for pkg in pkgs:
base_driver = __import__(pkg,
globals(), locals(), ['drivers'], -1)
drivers_dir = base_driver.__path__[0]
subdirectories += [x[0] for x in os.walk(drivers_dir)]
for subd in subdirectories:
syspath = sys.path
sys.path = [subd] + syspath
try:
files = os.listdir(subd)
except OSError:
msg = ("Failed to read files from dir %s" % (subd))
LOG.error(msg)
files = []
for fname in files:
if fname.endswith(".py") and fname != '__init__.py':
modules += [__import__(fname[:-3])]
sys.path = syspath
for module in modules:
for name, class_obj in inspect.getmembers(module):
if inspect.isclass(class_obj):
key = ''
if hasattr(class_obj, 'service_type') and (
not service_type or (service_type.lower() in (
class_obj.service_type.lower()))):
key += class_obj.service_type
else:
continue
if hasattr(class_obj, 'service_vendor'):
key += class_obj.service_vendor
if hasattr(class_obj, 'service_feature'):
key += class_obj.service_feature
if key:
driver_objects[key] = class_obj
return driver_objects
def load_agents(self, pkgs):
"""Load all the agents inside pkg.
@param pkg : package
e.g pkg = 'gbpservice.neutron.nsf.configurator.agents'
Returns: imported_service_agents list
"""
imported_service_agents = []
pkgs = self.conf.CONFIG_AGENTS.agents
for pkg in pkgs:
base_agent = __import__(pkg,
globals(), locals(), ['agents'], -1)
agents_dir = base_agent.__path__[0]
syspath = sys.path
sys.path = [agents_dir] + syspath
try:
files = os.listdir(agents_dir)
except OSError:
msg = ("Failed to read files from dir %s" % (agents_dir))
LOG.error(msg)
files = []
for fname in files:
if fname.endswith(".py") and fname != '__init__.py':
agent = __import__(pkg, globals(),
locals(), [fname[:-3]], -1)
imported_service_agents += [
eval('agent.%s' % (fname[:-3]))]
# modules += [__import__(fname[:-3])]
sys.path = syspath
return imported_service_agents

View File

@ -1,25 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
SERVICE_TYPE = 'vpn'
STATE_PENDING = 'PENDING_CREATE'
STATE_INIT = 'INIT'
STATE_ACTIVE = 'ACTIVE'
STATE_ERROR = 'ERROR'
VPN_GENERIC_CONFIG_RPC_TOPIC = "vyos_vpn_topic"
VPN_PLUGIN_TOPIC = 'vpn_plugin'
VPN_AGENT_TOPIC = 'vpn_agent'

View File

@ -1,542 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.contrib.nfp.configurator.lib import demuxer
from gbpservice.contrib.nfp.configurator.lib import utils
from gbpservice.nfp.core import context as module_context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import rpc
LOG = nfp_logging.getLogger(__name__)
class ConfiguratorRpcManager(object):
"""Implements procedure calls invoked by an REST server.
Implements following RPC methods.
- create_network_function_device_config
- delete_network_function_device_config
- update_network_function_device_config
- create_network_function_config
- delete_network_function_config
- update_network_function_config
- get_notifications
Also implements local methods for supporting RPC methods
"""
def __init__(self, sc, cm, conf, demuxer):
self.sc = sc
self.cm = cm
self.conf = conf
self.demuxer = demuxer
def _get_service_agent_instance(self, service_type):
"""Provides service agent instance based on service type.
:param service_type: firewall/vpn/loadbalancer/generic_config
Returns: Instance of service agent for a given service type
"""
return self.cm.sa_instances[service_type]
def _invoke_service_agent(self, operation,
request_data, is_generic_config=False):
"""Maps and invokes an RPC call to a service agent method.
Takes help of de-multiplexer to get service type and corresponding
data and invokes the method of service agent. Service agent instance
is identified based on the service type passed in the request data
:param operation: Operation type - create/delete/update
:param request_data: RPC data
Returns: None
"""
# Retrieves service type from RPC data
service_type = self.demuxer.get_service_type(request_data)
if (const.invalid_service_type == service_type):
msg = ("Configurator received invalid service type %s." %
service_type)
raise Exception(msg)
# Retrieves service agent information from RPC data
# Format of sa_req_list:
# [{'method': <m1>, 'kwargs': <rpc_data1>}, {}, ... ]
sa_req_list, service_type = self.demuxer.get_service_agent_info(
operation, service_type,
request_data, is_generic_config)
if not sa_req_list:
msg = ("Configurator received invalid data format for service"
" type %s. Data format: %r" % (service_type, request_data))
raise Exception(msg)
# Retrieves service agent instance using service type
sa_instance = self._get_service_agent_instance(service_type)
if not sa_instance:
msg = ("Failed to find agent with service type %s." % service_type)
raise Exception(msg)
# Notification data list that needs to be returned after processing
# RPC request. Format of notification data:
# notification_data[
# {
# 'receiver': <neutron/orchestrator>,
# 'resource': <firewall/vpn/loadbalancer/healthmonitor/
# routes/interfaces>,
# 'method': <network_function_device_notification/
# *aaS response RPC method name>,
# 'kwargs': [{<data1>}, {data2}]
# },
# {
# }, ...
# ]
#
# Initially, notification data will be empty and is populated
# after processing each request data in the request data list
notification_data = {}
# Handover the request data list and notification data to the
# identified service agent
sa_instance.process_request(sa_req_list, notification_data)
@log_helpers.log_method_call
def create_network_function_device_config(self, context, request_data):
"""RPC method to configure a network service device.
Configures a network service VM to facilitate network service
operation. This RPC method is invoked by the configurator REST
server. It configures a network service based on the configuration
request specified in the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']})
self._invoke_service_agent('create', request_data, True)
except Exception as err:
msg = ("Failed to create network device configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def delete_network_function_device_config(self, context, request_data):
"""RPC method to clear configuration of a network service device.
Clears configuration of a network service VM. This RPC method is
invoked by the configurator REST server. It clears configuration
of a network service based on the configuration request specified
in the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']})
self._invoke_service_agent('delete', request_data, True)
except Exception as err:
msg = ("Failed to delete network device configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def update_network_function_device_config(self, context, request_data):
"""RPC method to update of configuration in a network service device.
Updates configuration of a network service VM. This RPC method is
invoked by the configurator REST server. It updates configuration
of a network service based on the configuration request specified
in the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC UPDATE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']})
self._invoke_service_agent('update', request_data, True)
except Exception as err:
msg = ("Failed to update network device configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def create_network_function_config(self, context, request_data):
"""RPC method to configure a network service.
Configures a network service specified in the request data. This
RPC method is invoked by the configurator REST server. It configures
a network service based on the configuration request specified in
the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC CREATE NETWORK FUNCTION CONFIG "
"for %(service_type)s ",
{'service_type': request_data['info']['service_type']})
self._invoke_service_agent('create', request_data)
except Exception as err:
msg = ("Failed to create network service configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def delete_network_function_config(self, context, request_data):
"""RPC method to clear configuration of a network service.
Clears configuration of a network service. This RPC method is
invoked by the configurator REST server. It clears configuration
of a network service based on the configuration request specified
in the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC DELETE NETWORK FUNCTION CONFIG "
"for %(service_type)s ",
{'service_type': request_data['info']['service_type']})
self._invoke_service_agent('delete', request_data)
except Exception as err:
msg = ("Failed to delete network service configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def update_network_function_config(self, context, request_data):
"""RPC method to update of configuration in a network service.
Updates configuration of a network service. This RPC method is
invoked by the configurator REST server. It updates configuration
of a network service based on the configuration request specified
in the request_data argument.
:param context: RPC context instance
:param request_data: RPC data
Returns: None
"""
try:
nfp_context = module_context.init()
log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context
LOG.info("Received RPC UPDATE NETWORK FUNCTION CONFIG "
"for %(service_type)s ",
{'service_type': request_data['info']['service_type']})
self._invoke_service_agent('update', request_data)
except Exception as err:
msg = ("Failed to update network service configuration. %s" %
str(err).capitalize())
LOG.error(msg)
@log_helpers.log_method_call
def get_notifications(self, context):
"""RPC method to get all notifications published by configurator.
Gets all the notifications from the notifications from notification
queue and sends to configurator agent
:param context: RPC context instance
Returns: notification_data
"""
module_context.init()
LOG.info("Received RPC GET NOTIFICATIONS ")
events = self.sc.get_stashed_events()
notifications = []
for event in events:
notification = event.data
msg = ("Notification Data: %r" % notification)
notifications.append(notification)
LOG.info(msg)
return notifications
class ConfiguratorModule(object):
"""Implements configurator module APIs.
Implements methods which are either invoked by registered service
agents or by the configurator global methods. The methods invoked
by configurator global methods interface with service agents.
"""
def __init__(self, sc):
self.sa_instances = {}
self.imported_sas = []
def register_service_agent(self, service_type, service_agent):
"""Stores service agent object.
:param service_type: Type of service - firewall/vpn/loadbalancer/
generic_config.
:param service_agent: Instance of service agent class.
Returns: Nothing
"""
if service_type not in self.sa_instances:
msg = ("Configurator registered service agent of type %s." %
service_type)
LOG.info(msg)
else:
msg = ("Identified duplicate registration with service type %s." %
service_type)
LOG.warning(msg)
# Register the service agent irrespective of previous registration
self.sa_instances.update({service_type: service_agent})
def init_service_agents(self, sc, conf):
"""Invokes service agent initialization method.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
Returns: None
"""
for agent in self.imported_sas:
try:
agent.init_agent(self, sc, conf)
except AttributeError as attr_err:
LOG.error(agent.__dict__)
raise AttributeError(agent.__file__ + ': ' + str(attr_err))
def init_service_agents_complete(self, sc, conf):
"""Invokes service agent initialization complete method.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
Returns: None
"""
for agent in self.imported_sas:
try:
agent.init_agent_complete(self, sc, conf)
except AttributeError as attr_err:
LOG.error(agent.__dict__)
raise AttributeError(agent.__file__ + ': ' + str(attr_err))
def init_rpc(sc, cm, conf, demuxer):
"""Initializes oslo RPC client.
Creates RPC manager object and registers the configurator's RPC
agent object with core service controller.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param cm: Configurator module object that is used for accessing
ConfiguratorModule class methods.
:param conf: Configuration object that is used for configuration
parameter access.
:param demuxer: De-multiplexer object that is used for accessing
ServiceAgentDemuxer class methods.
Returns: None
"""
# Initializes RPC client
rpc_mgr = ConfiguratorRpcManager(sc, cm, conf, demuxer)
configurator_agent = rpc.RpcAgent(sc,
topic=const.CONFIGURATOR_RPC_TOPIC,
manager=rpc_mgr)
# Registers RPC client object with core service controller
sc.register_rpc_agents([configurator_agent])
def get_configurator_module_instance(sc, conf):
""" Provides ConfiguratorModule class object and loads service agents.
Returns: Instance of ConfiguratorModule class
"""
cm = ConfiguratorModule(sc)
conf_utils = utils.ConfiguratorUtils(conf)
# Loads all the service agents under AGENT_PKG module path
cm.imported_sas = conf_utils.load_agents(const.AGENTS_PKG)
msg = ("Configurator loaded service agents from %s location."
% (cm.imported_sas))
LOG.info(msg)
return cm
def nfp_module_init(sc, conf):
"""Initializes configurator module.
Creates de-multiplexer object and invokes all the agent entry point
functions. Initializes oslo RPC client for receiving messages from
REST server. Exceptions are raised to parent function for all types
of failures.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
Returns: None
Raises: Generic exception including error message
"""
# Create configurator module and de-multiplexer objects
try:
cm = get_configurator_module_instance(sc, conf)
demuxer_instance = demuxer.ServiceAgentDemuxer()
except Exception as err:
msg = ("Failed to initialize configurator de-multiplexer. %s."
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Initialized configurator de-multiplexer.")
LOG.info(msg)
# Initialize all the pre-loaded service agents
try:
cm.init_service_agents(sc, conf)
except Exception as err:
msg = ("Failed to initialize configurator agent modules. %s."
% (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Initialized configurator agents.")
LOG.info(msg)
# Initialize RPC client for receiving messages from REST server
try:
init_rpc(sc, cm, conf, demuxer_instance)
except Exception as err:
msg = ("Failed to initialize configurator RPC with topic %s. %s."
% (const.CONFIGURATOR_RPC_TOPIC, str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Initialized configurator RPC with topic %s."
% const.CONFIGURATOR_RPC_TOPIC)
LOG.debug(msg)
def nfp_module_post_init(sc, conf):
"""Invokes service agent's initialization complete methods.
:param sc: Service Controller object that is used for interfacing
with core service controller.
:param conf: Configuration object that is used for configuration
parameter access.
Returns: None
Raises: Generic exception including error message
"""
try:
cm = get_configurator_module_instance(sc, conf)
cm.init_service_agents_complete(sc, conf)
except Exception as err:
msg = ("Failed to trigger initialization complete for configurator"
" agent modules. %s." % (str(err).capitalize()))
LOG.error(msg)
raise Exception(err)
else:
msg = ("Initialization of configurator agent modules completed.")
LOG.info(msg)

View File

@ -1,326 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
from gbpservice._i18n import _
from gbpservice.common import utils
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.nfp.core import log as nfp_logging
import netaddr
from neutron.db import l3_db
from neutron.db.l3_db import DEVICE_OWNER_ROUTER_INTF
from neutron.db.l3_db import EXTERNAL_GW_INFO
from neutron.db.models.l3 import RouterPort
from neutron.db import models_v2
from neutron_lib import constants as nlib_const
from neutron_lib import exceptions as n_exc
from neutron_lib.exceptions import l3
from neutron_lib.plugins import constants as n_const
import neutron_fwaas.extensions
from neutron_fwaas.services.firewall import fwaas_plugin as ref_fw_plugin
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import orm
from neutron_fwaas.db.firewall import firewall_db as n_firewall
LOG = nfp_logging.getLogger(__name__)
class NFPFirewallPlugin(ref_fw_plugin.FirewallPlugin):
def __init__(self):
# Monkey patch L3 agent topic
# L3 agent was where reference firewall agent runs
# patch that topic to the NFP firewall agent's topic name
ref_fw_plugin.f_const.L3_AGENT = topics.FW_NFP_CONFIGAGENT_TOPIC
# Ensure neutron fwaas extensions are loaded
ext_path = neutron_fwaas.extensions.__path__[0]
if ext_path not in cfg.CONF.api_extensions_path.split(':'):
cfg.CONF.set_override(
'api_extensions_path',
cfg.CONF.api_extensions_path + ':' + ext_path)
super(NFPFirewallPlugin, self).__init__()
# Modifying following plugin function, to relax same router validation
def _get_routers_for_create_firewall(self, tenant_id, context, firewall):
# pop router_id as this goes in the router association db
# and not firewall db
router_ids = firewall['firewall'].pop('router_ids', None)
if router_ids == nlib_const.ATTR_NOT_SPECIFIED:
return tenant_id
def set_routers_for_firewall(self, context, fw):
"""Sets the routers associated with the fw."""
pass
def get_firewall_routers(self, context, fwid):
"""Gets all routers associated with a firewall."""
fw_rtrs = ['1234567890']
return fw_rtrs
def validate_firewall_routers_not_in_use(
self, context, router_ids, fwid=None):
"""Validate if router-ids not associated with any firewall.
If any of the router-ids in the list is already associated with
a firewall, raise an exception else just return.
"""
pass
def update_firewall_routers(self, context, fw):
"""Update the firewall with new routers.
This involves removing existing router associations and replacing
it with the new router associations provided in the update method.
"""
return fw
# Monkey patching the create_firewall db method
def create_firewall(self, context, firewall, status=None):
fw = firewall['firewall']
tenant_id = fw['tenant_id']
# distributed routers may required a more complex state machine;
# the introduction of a new 'CREATED' state allows this, whilst
# keeping a backward compatible behavior of the logical resource.
if not status:
status = n_const.PENDING_CREATE
with context.session.begin(subtransactions=True):
self._validate_fw_parameters(context, fw, tenant_id)
firewall_db = n_firewall.Firewall(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=fw['name'],
description=fw['description'],
firewall_policy_id=fw['firewall_policy_id'],
admin_state_up=fw['admin_state_up'],
status=status)
context.session.add(firewall_db)
return self._make_firewall_dict(firewall_db)
n_firewall.Firewall_db_mixin.create_firewall = create_firewall
# Monkey patching l3_db's _get_router_for_floatingip method to associate
# floatingip if corresponding routes is present.
def _is_net_reachable_from_net(self, context, tenant_id, from_net_id,
to_net_id):
"""Check whether a network is reachable.
Follow the paths of networks connected by devices, to determine
whether a network is reachable from another.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param from_net_id: the source network for the search
@param to_net_id: the destination network for the search
@return: True or False whether a path exists
"""
original_context = context
context = elevate_context(context)
tenant_id = context.tenant_id
def nexthop_nets_query(nets, visited):
"""query networks connected to devices on nets but not visited."""
Port = models_v2.Port
devices_on_nets = context.session.query(Port.device_id).filter(
Port.tenant_id == tenant_id,
Port.device_owner.notin_([nlib_const.DEVICE_OWNER_DHCP]),
Port.network_id.in_(nets)).subquery()
return context.session.query(Port.network_id).filter(
Port.tenant_id == tenant_id,
Port.network_id.notin_(visited),
Port.device_id.in_(devices_on_nets))
visited = set([])
nets = set([from_net_id])
while nets:
if to_net_id in nets:
context = original_context
return True
visited |= nets
nets = set((tup[0] for tup in nexthop_nets_query(nets, visited)))
context = original_context
return False
def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop):
"""Find the network to which the nexthop belongs.
Iterate over the router interfaces to find the network of nexthop.
@param context: neutron api request context
@param tenant_id: the owning tenant
@param router_id: a router id
@param nexthop: an IP address
@return: the network id of the nexthop or None if not found
"""
interfaces = context.session.query(models_v2.Port).filter_by(
tenant_id=tenant_id,
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF)
for interface in interfaces:
cidrs = [self._core_plugin._get_subnet(context,
ip['subnet_id'])['cidr']
for ip in interface['fixed_ips']]
if netaddr.all_matching_cidrs(nexthop, cidrs):
return interface['network_id']
def _find_routers_via_routes_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
"""Find routers with route to the internal IP address.
Iterate over the routers that belong to the same tenant as
'internal_port'. For each router check that the router is connected
to the external network and whether there is a route to the internal
IP address. Consider only routers for which there is a path from the
nexthop of the route to the internal port.
Sort the list of routers to have the router with the most specific
route first (largest CIDR prefix mask length).
@param context: neutron api request context
@param internal_port: the port dict for the association
@param internal_subnet_id: the subnet for the association
@param external_network_id: the network of the floatingip
@return: a sorted list of matching routers
"""
original_context = context
context = elevate_context(context)
internal_ip_address = [
ip['ip_address'] for ip in internal_port['fixed_ips']
if ip['subnet_id'] == internal_subnet_id
][0]
# find the tenant routers
tenant_id = internal_port['tenant_id']
routers = self.get_routers(context, filters={'tenant_id': [tenant_id]})
prefix_routers = []
for router in routers:
# verify that the router is on "external_network"
gw_info = router.get(EXTERNAL_GW_INFO)
if not gw_info or gw_info['network_id'] != external_network_id:
continue
# find a matching route
if 'routes' not in router:
continue
cidr_nexthops = {}
for route in router['routes']:
cidr = netaddr.IPNetwork(route['destination'])
if cidr not in cidr_nexthops:
cidr_nexthops[cidr] = []
cidr_nexthops[cidr].append(route['nexthop'])
smallest_cidr = netaddr.smallest_matching_cidr(
internal_ip_address,
list(cidr_nexthops.keys()))
if not smallest_cidr:
continue
# validate that there exists a path to "internal_port"
for nexthop in cidr_nexthops[smallest_cidr]:
net_id = self._find_net_for_nexthop(context, context.tenant_id,
router['id'], nexthop)
if net_id and self._is_net_reachable_from_net(
context,
context.tenant_id,
net_id,
internal_port['network_id']):
prefix_routers.append(
(smallest_cidr.prefixlen, router['id']))
break
context = original_context
return [p_r[1] for p_r in sorted(prefix_routers, reverse=True)]
def elevate_context(context):
context = context.elevated()
context.tenant_id = _resource_owner_tenant_id()
return context
def _resource_owner_tenant_id():
user, pwd, tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
try:
tenant = keystoneclient.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error('Multiple tenants matches found for %s', tenant)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
if not subnet['gateway_ip']:
msg = (_('Cannot add floating IP to port on subnet %s '
'which has no gateway_ip') % internal_subnet_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
# Find routers(with router_id and interface address) that
# connect given internal subnet and the external network.
# Among them, if the router's interface address matches
# with subnet's gateway-ip, return that router.
# Otherwise return the first router.
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(nlib_const.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet_id
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id).distinct()
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
router_ids = self._find_routers_via_routes_for_floatingip(
context,
internal_port,
internal_subnet_id,
external_network_id)
if router_ids:
return router_ids[0]
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet_id,
external_network_id=external_network_id,
port_id=internal_port['id'])
l3_db.L3_NAT_dbonly_mixin._get_router_for_floatingip = (
_get_router_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_routers_via_routes_for_floatingip = (
_find_routers_via_routes_for_floatingip)
l3_db.L3_NAT_dbonly_mixin._find_net_for_nexthop = _find_net_for_nexthop
l3_db.L3_NAT_dbonly_mixin._is_net_reachable_from_net = (
_is_net_reachable_from_net)

View File

@ -1,27 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.\
haproxy import haproxy_driver_constants
from neutron_lbaas.drivers.common import agent_driver_base as adb
class HaproxyOnVMPluginDriver(adb.AgentDriverBase):
device_driver = haproxy_driver_constants.DRIVER_NAME
def __init__(self, plugin):
# Monkey patch LB agent topic and LB agent type
adb.lb_const.LOADBALANCER_AGENTV2 = topics.LBV2_NFP_CONFIGAGENT_TOPIC
adb.lb_const.AGENT_TYPE_LOADBALANCERV2 = 'NFP Loadbalancer V2 agent'
super(HaproxyOnVMPluginDriver, self).__init__(plugin)

View File

@ -1,288 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import time
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.services.vpn.plugin import VPNDriverPlugin
from neutron_vpnaas.services.vpn.plugin import VPNPlugin
from neutron_vpnaas.services.vpn.service_drivers import base_ipsec
import oslo_messaging
from gbpservice._i18n import _
from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
BASE_VPN_VERSION = '1.0'
AGENT_TYPE_VPN = 'NFP Vpn agent'
ACTIVE = 'ACTIVE'
DOWN = 'DOWN'
ERROR = 'ERROR'
TIMEOUT = 80
class VPNAgentHostingServiceNotFound(exceptions.NeutronException):
message = _("VPN Agent hosting vpn service '%(vpnservice_id)s' not found")
class VPNAgentNotFound(exceptions.NeutronException):
message = _("VPN Agent not found in agent_db")
class VPNPluginExt(VPNPlugin, agentschedulers_db.AgentSchedulerDbMixin):
"""
Extends the base VPN Plugin class to inherit agentdb too.
Required to get agent entry into the database.
"""
def __init__(self):
super(VPNPluginExt, self).__init__()
class NFPIPsecVPNDriverCallBack(base_ipsec.IPsecVpnDriverCallBack):
"""Callback for IPSecVpnDriver rpc."""
target = oslo_messaging.Target(version=BASE_VPN_VERSION)
def __init__(self, driver):
super(NFPIPsecVPNDriverCallBack, self).__init__(driver)
self.driver = driver
def update_status(self, context, status):
"""Update status of vpnservices."""
if 'ipsec_site_connections' not in status[0]:
status[0]['ipsec_site_connections'] = {}
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class NFPIPsecVpnAgentApi(base_ipsec.IPsecVpnAgentApi):
"""API and handler for NFP IPSec plugin to agent RPC messaging."""
target = oslo_messaging.Target(version=BASE_VPN_VERSION)
def __init__(self, topic, default_version, driver):
super(NFPIPsecVpnAgentApi, self).__init__(
topic, default_version, driver)
def _is_agent_hosting_vpnservice(self, agent):
"""
In case we have agent running on each compute node.
We have to write logic here to get
the agent which is hosting this vpn service
"""
host = agent['host']
lhost = socket.gethostname()
if host == lhost:
return True
return False
def _get_agent_hosting_vpnservice(self, admin_context, vpnservice_id):
filters = {'agent_type': [AGENT_TYPE_VPN]}
agents = directory.get_plugin().get_agents(
admin_context, filters=filters)
try:
for agent in agents:
if not agent['alive']:
continue
res = self._is_agent_hosting_vpnservice(agent)
if res is True:
return agent
# valid vpn agent is not found, hostname comparison might be
# failed. Return whichever agent is available.
for agent in agents:
if not agent['alive']:
LOG.debug("Cannot get a active vpn agent, skipped")
continue
return agent
except Exception:
raise VPNAgentNotFound()
msg = ('No active vpn agent found. Configuration will fail.')
LOG.error(msg)
raise VPNAgentHostingServiceNotFound(vpnservice_id=vpnservice_id)
def _agent_notification(self, context, method, vpnservice_id,
version=None, **kwargs):
admin_context = context.is_admin and context or context.elevated()
if not version:
version = self.target.version
vpn_agent = self._get_agent_hosting_vpnservice(
admin_context, vpnservice_id)
msg = (('Notify agent at %(topic)s.%(host)s the message '
'%(method)s %(args)s')
% {'topic': self.topic,
'host': vpn_agent['host'],
'method': method, 'args': kwargs})
LOG.debug(msg)
cctxt = self.client.prepare(server=vpn_agent['host'],
version=version)
cctxt.cast(context, method, **kwargs)
def vpnservice_updated(self, context, vpnservice_id, **kwargs):
"""
Make rpc to agent for 'vpnservice_updated'
"""
try:
self._agent_notification(
context, 'vpnservice_updated',
vpnservice_id, **kwargs)
except Exception:
msg = ('Notifying agent failed')
LOG.error(msg)
class VPNValidator(vpn_validator.VpnReferenceValidator):
"""This class overrides the vpnservice validator method"""
def __init__(self):
super(VPNValidator, self).__init__()
def validate_vpnservice(self, context, vpns):
pass
class NFPIPsecVPNDriver(base_ipsec.BaseIPsecVPNDriver):
"""VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(NFPIPsecVPNDriver, self).__init__(
service_plugin)
self.validator = VPNValidator()
def create_rpc_conn(self):
self.endpoints = [
NFPIPsecVPNDriverCallBack(self),
agents_db.AgentExtRpcCallback(VPNPluginExt())]
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(
topics.VPN_NFP_PLUGIN_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = NFPIPsecVpnAgentApi(
topics.VPN_NFP_CONFIGAGENT_TOPIC, BASE_VPN_VERSION, self)
def _get_service_vendor(self, context, vpnservice_id):
vpnservice = self.service_plugin.get_vpnservice(
context, vpnservice_id)
desc = vpnservice['description']
# if the call is through GBP workflow,
# fetch the service profile from description
# else, use 'VYOS' as the service profile
if 'service_vendor=' in desc:
tokens = desc.split(';')
service_vendor = tokens[5].split('=')[1]
else:
service_vendor = 'VYOS'
return service_vendor
def create_ipsec_site_connection(self, context, ipsec_site_connection):
service_vendor = self._get_service_vendor(
context,
ipsec_site_connection['vpnservice_id'])
starttime = 0
while starttime < TIMEOUT:
vpnservice = self.service_plugin.get_vpnservice(
context,
ipsec_site_connection['vpnservice_id'])
# (Revisit):Due to device driver issue neutron is making vpnservice
# state in Down state, At this point of time,
# Allowing ipsec site connection to gets created though
# vpnservice is in down state.
if vpnservice['status'] in [ACTIVE, DOWN]:
self.agent_rpc.vpnservice_updated(
context,
ipsec_site_connection['vpnservice_id'],
rsrc_type='ipsec_site_connection',
svc_type=self.service_type,
rsrc_id=ipsec_site_connection['id'],
resource=ipsec_site_connection,
reason='create', service_vendor=service_vendor)
break
elif vpnservice['status'] == ERROR:
msg = ('updating ipsec_site_connection with id %s to'
'ERROR state' % (ipsec_site_connection['id']))
LOG.error(msg)
VPNPluginExt().update_ipsec_site_conn_status(
context,
ipsec_site_connection['id'],
ERROR)
break
time.sleep(5)
starttime += 5
else:
msg = ('updating ipsec_site_connection with id %s to'
'ERROR state' % (ipsec_site_connection['id']))
LOG.error(msg)
VPNPluginExt().update_ipsec_site_conn_status(
context,
ipsec_site_connection['id'],
ERROR)
def _move_ipsec_conn_state_to_error(self, context, ipsec_site_connection):
vpnsvc_status = [{
'id': ipsec_site_connection['vpnservice_id'],
'status':ERROR,
'updated_pending_status':False,
'ipsec_site_connections':{
ipsec_site_connection['id']: {
'status': ERROR,
'updated_pending_status': True}}}]
driver = VPNDriverPlugin()._get_driver_for_ipsec_site_connection(
context,
ipsec_site_connection)
NFPIPsecVPNDriverCallBack(driver).update_status(context,
vpnsvc_status)
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
service_vendor = self._get_service_vendor(
context,
ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(
context,
ipsec_site_connection['vpnservice_id'],
rsrc_type='ipsec_site_connection',
svc_type=self.service_type,
rsrc_id=ipsec_site_connection['id'],
resource=ipsec_site_connection,
reason='delete', service_vendor=service_vendor)
def create_vpnservice(self, context, vpnservice):
service_vendor = self._get_service_vendor(context,
vpnservice['id'])
self.agent_rpc.vpnservice_updated(
context,
vpnservice['id'],
rsrc_type='vpn_service',
svc_type=self.service_type,
rsrc_id=vpnservice['id'],
resource=vpnservice,
reason='create', service_vendor=service_vendor)
def delete_vpnservice(self, context, vpnservice):
pass
def update_vpnservice(self, context, old_vpnservice, new_vpnservice):
pass

View File

@ -1,811 +0,0 @@
#!/usr/bin/python
import argparse
import sys
import os
import shutil
import subprocess
import configparser
import subprocess
import time
import platform
from .image_builder import disk_image_create as DIB
# Defines
TEMP_WORK_DIR = "tmp"
CONFIG = configparser.ConfigParser()
NEUTRON_CONF = "/etc/neutron/neutron.conf"
NEUTRON_ML2_CONF = "/etc/neutron/plugins/ml2/ml2_conf.ini"
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
CONFIGURATOR_USER_DATA = FILE_PATH + "/image_builder/configurator_user_data"
TEMPLATES_PATH = FILE_PATH + "/templates/gbp_resources.yaml"
APIC_ENV = False
# global values
# these src_dirs will be copied from host to inside docker image, these
# diretories are assumed to present in src_path
src_dirs = ["gbpservice", "neutron", "neutron_lbaas", "neutron_lib"]
# create a temp directory for copying srcs
dst_dir = "/tmp/controller_docker_build/"
parser = argparse.ArgumentParser()
parser.add_argument('--configure', action='store_true',
dest='configure_nfp',
default=False, help='Configure NFP')
parser.add_argument('--build-controller-vm', action='store_true',
dest='build_controller_vm',
default=False, help='enable building controller vm')
parser.add_argument('--image-build-cache-dir', type=str,
help=('directory path where trusty image tar.gz'
' can be found for building controller vm'))
parser.add_argument('--enable-orchestrator', action='store_true',
dest='enable_orchestrator',
default=False,
help='enable creating orchestrator systemctl file')
parser.add_argument('--enable-proxy', action='store_true',
dest='enable_proxy',
default=False,
help='enable creating proxy systemctl file')
parser.add_argument('--create-resources', action='store_true',
dest='create_resources',
default=False,
help='enable creating nfp required resources')
parser.add_argument('--launch-controller', action='store_true',
dest='launch_controller',
default=False, help='enable to launch controller vm')
parser.add_argument('--configure-ext-net',
action='store_true', default=False,
help=('Configure heat driver section in nfp.ini.'
' Specify external network name with --ext-net-name option.'))
parser.add_argument('--ext-net-name', type=str,
default='',
help=('Provide external network(neutron network) name.'
' Use along with --configure-ext-net.'))
parser.add_argument('--clean-up', action='store_true', dest='clean_up_nfp',
default=False,
help='enable to clean up nfp services and resources')
parser.add_argument('--controller-path', type=str, dest='controller_path',
help='patch to the controller image')
args = parser.parse_args()
def check_if_apic_sys():
global APIC_ENV
mech_drivers = subprocess.getoutput("crudini --get " + NEUTRON_ML2_CONF + " ml2 mechanism_drivers")
if mech_drivers == 'apic_gbp':
APIC_ENV = True
def set_keystone_authtoken_section():
global NEUTRON_CONF
nfp_conf = '/etc/nfp.ini'
admin_user = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken username")
admin_password = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken password")
admin_tenant_name = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken project_name")
auth_uri = subprocess.getoutput("crudini --get " + NEUTRON_CONF + " keystone_authtoken auth_uri")
auth_protocol = subprocess.getoutput("echo " + auth_uri + " | cut -d':' -f1")
auth_host = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f1")
auth_port = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f3 | cut -d':' -f2")
auth_version = subprocess.getoutput("echo " + auth_uri + " | cut -d'/' -f4")
if auth_version == '':
auth_version = 'v2.0'
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken admin_user " + admin_user).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken admin_password " + admin_password).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken admin_tenant_name " + admin_tenant_name).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken auth_protocol " + auth_protocol).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken auth_host " + auth_host).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken auth_port " + auth_port).split(' '))
subprocess.call(("crudini --set " + nfp_conf + " nfp_keystone_authtoken auth_version " + auth_version).split(' '))
def configure_nfp():
subprocess.getoutput("cat /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/nfp.ini >> /etc/nfp.ini")
subprocess.getoutput("mkdir -p /etc/nfp/vyos/")
subprocess.getoutput("cp -r /usr/lib/python2.7/site-packages/gbpservice/contrib/nfp/bin/vyos.day0 /etc/nfp/vyos/")
subprocess.getoutput("sed -i 's/\"password\": \"\"/\"password\": \"vyos\"/' /etc/nfp/vyos/vyos.day0")
set_keystone_authtoken_section()
check_if_apic_sys()
curr_service_plugins = subprocess.getoutput("crudini --get /etc/neutron/neutron.conf DEFAULT service_plugins")
curr_service_plugins_list = curr_service_plugins.split(",")
lbaas_enabled = [x for x in curr_service_plugins_list if 'lbaas' in x]
vpnaas_enabled = [x for x in curr_service_plugins_list if 'vpnaas' in x]
fwaas_enabled = [x for x in curr_service_plugins_list if 'fwaas' in x]
firewall_enabled = [x for x in curr_service_plugins_list if 'firewall' in x]
for word in firewall_enabled:
if word not in fwaas_enabled:
fwaas_enabled.append(word)
plugins_to_enable = ["ncp"]
for plugin in plugins_to_enable:
if plugin not in curr_service_plugins_list:
curr_service_plugins_list.append(plugin)
if "servicechain" in curr_service_plugins_list:
curr_service_plugins_list.remove("servicechain")
if not len(vpnaas_enabled):
curr_service_plugins_list.append("vpnaas")
else:
for word in vpnaas_enabled:
curr_service_plugins_list.remove(word)
curr_service_plugins_list.append("vpnaas")
# enable lbaasv2 by default
if not len(lbaas_enabled):
curr_service_plugins_list.append("lbaasv2")
else:
for word in lbaas_enabled:
curr_service_plugins_list.remove(word)
curr_service_plugins_list.append("lbaasv2")
if not len(fwaas_enabled):
curr_service_plugins_list.append("nfp_fwaas")
else:
for word in fwaas_enabled:
curr_service_plugins_list.remove(word)
curr_service_plugins_list.append("nfp_fwaas")
new_service_plugins_list = curr_service_plugins_list
new_service_plugins = ",".join(new_service_plugins_list)
subprocess.call(("crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins " + str(new_service_plugins)).split(' '))
#check id gbp-heat is configured, if not configure
curr_heat_plugin_dirs = subprocess.getoutput("crudini --get /etc/heat/heat.conf DEFAULT plugin_dirs")
curr_heat_plugin_dirs_list = curr_heat_plugin_dirs.split(",")
heat_dirs_to_enable = ["/usr/lib64/heat", "/usr/lib/heat", "/usr/lib/python2.7/site-packages/gbpautomation/heat"]
for dir in heat_dirs_to_enable:
if dir not in curr_heat_plugin_dirs_list:
curr_heat_plugin_dirs_list.append(dir)
new_heat_plugin_dirs_list = curr_heat_plugin_dirs_list
new_heat_plugin_dirs = ",".join(new_heat_plugin_dirs_list)
subprocess.call(("crudini --set /etc/heat/heat.conf DEFAULT plugin_dirs " + str(new_heat_plugin_dirs)).split(' '))
# Enable GBP extension driver for service sharing
if not APIC_ENV:
subprocess.call("crudini --set /etc/neutron/neutron.conf group_policy policy_drivers implicit_policy,resource_mapping,chain_mapping".split(' '))
else:
subprocess.call("crudini --set /etc/neutron/neutron.conf group_policy policy_drivers implicit_policy,apic,chain_mapping".split(' '))
# Configure policy_drivers if section group_policy exists in the config file
ret = subprocess.call("crudini --get /etc/neutron/plugins/ml2/ml2_conf_cisco_apic.ini group_policy".split(' '))
if not ret:
subprocess.call("crudini --set /etc/neutron/plugins/ml2/ml2_conf_cisco_apic.ini group_policy policy_drivers implicit_policy,apic,chain_mapping".split(' '))
subprocess.call("crudini --set /etc/neutron/neutron.conf group_policy extension_drivers proxy_group".split(' '))
# Configure service owner
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_user neutron".split(' '))
admin_password = subprocess.getoutput("crudini --get /etc/neutron/neutron.conf keystone_authtoken password")
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_password".split(' ') + [admin_password])
subprocess.call("crudini --set /etc/neutron/neutron.conf admin_owned_resources_apic_tscp plumbing_resource_owner_tenant_name services".split(' '))
# Configure NFP drivers
subprocess.call("crudini --set /etc/neutron/neutron.conf node_composition_plugin node_plumber admin_owned_resources_apic_plumber".split(' '))
subprocess.call("crudini --set /etc/neutron/neutron.conf node_composition_plugin node_drivers nfp_node_driver".split(' '))
subprocess.call("crudini --set /etc/neutron/neutron.conf nfp_node_driver is_service_admin_owned False".split(' '))
subprocess.call("crudini --set /etc/neutron/neutron.conf nfp_node_driver svc_management_ptg_name svc_management_ptg".split(' '))
# Enable ML2 port security
subprocess.call("crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security".split(' '))
# Update neutron server to use GBP policy
subprocess.call("crudini --set /etc/neutron/neutron.conf DEFAULT policy_file /etc/group-based-policy/policy.d/policy.json".split(' '))
# Update neutron LBaaS with NFP LBaaS v2 service provider
subprocess.call("crudini --set /etc/neutron/neutron_lbaas.conf service_providers service_provider LOADBALANCERV2:loadbalancerv2:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaasv2_plugin_driver.HaproxyOnVMPluginDriver:default".split(' '))
# Update neutron VPNaaS with NFP VPNaaS service provider
subprocess.call(["grep -q '^service_provider.*NFPIPsecVPNDriver:default' /etc/neutron/neutron_vpnaas.conf; if [[ $? = 1 ]]; then sed -i '/^service_provider.*IPsecVPNDriver/ s/:default/\\nservice_provider\ =\ VPN:vpn:gbpservice.contrib.nfp.service_plugins.vpn.drivers.nfp_vpnaas_driver.NFPIPsecVPNDriver:default/' /etc/neutron/neutron_vpnaas.conf; fi"], shell=True)
# Update DB
subprocess.call("gbp-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head".split(' '))
# Restart the services to make the configuration effective
subprocess.call("systemctl restart nfp_orchestrator".split(' '))
subprocess.call("systemctl restart nfp_config_orch".split(' '))
subprocess.call("systemctl restart openstack-heat-engine".split(' '))
subprocess.call("systemctl restart neutron-server".split(' '))
def get_src_dirs():
print("Getting source dirs for copying inside the docker image")
# get the operating system type
(os_type, os_version, os_release) = platform.dist()
if os_type in ['Ubuntu']:
src_path = "/usr/lib/python2.7/dist-packages/"
elif os_type in ['centos', 'redhat']:
src_path = "/usr/lib/python2.7/site-packages/"
else:
print(("ERROR: Unsupported Operating System(%s)" % os_type))
return 1
for src_dir in src_dirs:
to_copy = src_path + src_dir
if not os.path.isdir(to_copy):
print(("ERROR: directory not found: ", to_copy))
return 1
# create a tmp directory for creating configurator docker
subprocess.call(["rm", "-rf", dst_dir])
os.mkdir(dst_dir)
dockerfile = DIB.cur_dir + "/Dockerfile"
run_sh = DIB.cur_dir + "/configurator_run.sh"
# these src_dirs will be copied from host to inside docker image
for src_dir in src_dirs:
to_copy = src_path + src_dir
if(subprocess.call(["cp", "-r", to_copy, dst_dir])):
print(("ERROR: failed to copy %s to ./ directory" % to_copy))
return 1
subprocess.call(["cp", dockerfile, dst_dir])
subprocess.call(["cp", run_sh, dst_dir])
DIB.docker_build_dir = dst_dir
return 0
def clean_src_dirs():
subprocess.call(["rm", "-rf", dst_dir])
def build_configuration_vm():
cur_dir = os.path.dirname(__file__)
cur_dir = os.path.realpath(cur_dir)
if not cur_dir:
# if script is executed from current dir, get abs path
cur_dir = os.path.realpath('./')
# update dib current working dir
DIB.cur_dir = cur_dir + '/image_builder'
if(get_src_dirs()):
return
# set the cache dir where trusty tar.gz will be present
if args.image_build_cache_dir:
cache_dir = args.image_build_cache_dir
else:
cache_dir = os.environ.get('HOME', '-1') + '/.cache/image-create'
# create a configurattion dictionary needed by DIB
DIB.conf['ubuntu_release'] = {'release': 'trusty'}
DIB.conf['dib'] = {"image_size": 10, "elements": ["configurator", "root-passwd"],
"root_pswd": "nfp123",
"offline": True, "cache_dir": cache_dir}
# Build configurator VM
(ret, image) = DIB.dib()
if not ret:
print("ERROR: Failed to create Configurator VM")
else:
print(("SUCCESS, created Configurator VM: ", image))
# clean the scr_dirs copied in PWD
clean_src_dirs()
os.chdir(cur_dir)
return
def restart_nfp_orchestrator():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_orchestrator", "restart"])
except Exception as error:
print("Error restarting nfp_orchestrator service")
print(error)
sys.exit(1)
def restart_nfp_config_orch():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_config_orch", "restart"])
except Exception as error:
print("Error restarting nfp_orchestrator service")
print(error)
sys.exit(1)
def restart_nfp_proxy():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_proxy", "restart"])
except Exception as error:
print("Error restarting nfp_proxy service")
print(error)
sys.exit(1)
def restart_nfp_proxy_agent():
try:
subprocess.call(["systemctl", "daemon-reload"])
subprocess.call(["service", "nfp_proxy_agent", "restart"])
except Exception as error:
print("Error restarting nfp_proxy_agent service")
print(error)
sys.exit(1)
def create_orchestrator_ctl():
"""
create nfp orchestrator systemctl service file
"""
if not os.path.exists("/var/log/nfp"):
os.makedirs("/var/log/nfp")
os.system("chown neutron:neutron /var/log/nfp")
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
orch_ctl_file = TEMP_WORK_DIR + "/nfp_orchestrator.service"
try:
file = open(orch_ctl_file, 'w+')
except:
print(("Error creating " + orch_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Orchestrator\n")
file.write("After=syslog.target network.target\n\n[Service]")
file.write("\nUser=neutron\nExecStart=/usr/bin/nfp --module orchestrator")
file.write(" --config-file /etc/neutron/neutron.conf --config-file ")
file.write(" /etc/neutron/plugins/ml2/ml2_conf.ini ")
file.write(" --config-file /etc/nfp.ini ")
file.write("--log-file /var/log/nfp/nfp_orchestrator.log\n\n")
file.write("[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(orch_ctl_file, "/usr/lib/systemd/system/")
else:
print("Error: /usr/lib/systemd/system not present")
sys.exit(1)
subprocess.call(["systemctl", "enable", "nfp_orchestrator"])
orch_config_file = TEMP_WORK_DIR + "/nfp_config_orch.service"
try:
file = open(orch_config_file, 'w+')
except:
print(("Error creating " + orch_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Config Orchestrator")
file.write("\nAfter=syslog.target network.target")
file.write("\n\n[Service]\nType=simple\nUser=neutron")
file.write("\nExecStart=/usr/bin/nfp"
" --module config_orchestrator"
" --config-file /etc/nfp.ini")
file.write(" --config-file /etc/neutron/neutron.conf"
" --log-file /var/log/nfp/nfp_config_orch.log")
file.write("\n\n[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(orch_config_file, "/usr/lib/systemd/system/")
else:
print("Error: /usr/lib/systemd/system not present")
sys.exit(1)
subprocess.call(["systemctl", "enable", "nfp_config_orch"])
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_nfp_namespace_file():
"""
create nfp proxy systemctl service file
"""
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_tool_file = TEMP_WORK_DIR + "/nfp_namespace"
try:
filepx = open(proxy_tool_file, 'w+')
except:
print(("Error creating " + proxy_tool_file + " file"))
sys.exit(1)
filepx.write("#!/usr/bin/bash\n")
filepx.write("\nNOVA_CONF=/etc/nova/nova.conf\nNOVA_SESSION=neutron")
filepx.write("\n\nget_openstack_creds () {")
filepx.write("\n\tAUTH_URI=`crudini --get $NOVA_CONF $NOVA_SESSION auth_url`")
filepx.write("\n\t# if auth_url option is not available, look for admin_auth_url"
"\n\tif [[ $? = 1 ]]; then"
"\n\t\tAUTH_URI=`crudini --get $NOVA_CONF $NOVA_SESSION admin_auth_url`"
"\n\tfi")
filepx.write("\n\tADMIN_USER=`crudini --get $NOVA_CONF $NOVA_SESSION username`")
filepx.write("\n\t# if username option is not available, look for admin_username"
"\n\tif [[ $? = 1 ]]; then"
"\n\t\tADMIN_USER=`crudini --get $NOVA_CONF $NOVA_SESSION admin_username`")
filepx.write("\n\t\t# if admin_username option is not available, look for admin_user"
"\n\t\tif [[ $? = 1 ]]; then"
"\n\t\t\tADMIN_USER=`crudini --get $NOVA_CONF $NOVA_SESSION admin_user`"
"\n\t\tfi"
"\n\tfi")
filepx.write("\n\tADMIN_PASSWD=`crudini --get $NOVA_CONF $NOVA_SESSION password`")
filepx.write("\n\t# if password option is not available, look for admin_password"
"\n\tif [[ $? = 1 ]]; then"
"\n\t\tADMIN_PASSWD=`crudini --get $NOVA_CONF $NOVA_SESSION admin_password`"
"\n\tfi")
filepx.write("\n\tADMIN_TENANT_NAME=`crudini --get $NOVA_CONF $NOVA_SESSION project_name`")
filepx.write("\n\t# if project_name option is not available, look for admin_tenant_name"
"\n\tif [[ $? = 1 ]]; then"
"\n\t\tADMIN_TENANT_NAME=`crudini --get $NOVA_CONF $NOVA_SESSION admin_tenant_name`"
"\n\tfi")
filepx.write("\n\texport OS_USERNAME=$ADMIN_USER")
filepx.write("\n\texport OS_TENANT_NAME=$ADMIN_TENANT_NAME")
filepx.write("\n\texport OS_PASSWORD=$ADMIN_PASSWD")
filepx.write("\n\tif [[ $AUTH_URI == *\"v3\"* ]]; then"
"\n\t\tADMIN_PROJECT_DOMAIN_NAME=`crudini --get $NOVA_CONF"
" $NOVA_SESSION project_domain_name`"
"\n\t\tADMIN_USER_DOMAIN_NAME=`crudini --get $NOVA_CONF"
" $NOVA_SESSION user_domain_name`"
"\n\t\texport OS_PROJECT_DOMAIN_NAME=$ADMIN_PROJECT_DOMAIN_NAME"
"\n\t\texport OS_USER_DOMAIN_NAME=$ADMIN_USER_DOMAIN_NAME"
"\n\tfi")
filepx.write("\n\texport OS_AUTH_URL=$AUTH_URI\n\n}")
filepx.write("\n\nfunction namespace_delete {\n\tget_openstack_creds")
filepx.write("\n\n\tproxyPortId=`neutron port-list | ")
filepx.write("grep pt_nfp_proxy_pt | awk '{print $2}'`")
filepx.write("\n\ttapName=\"tap${proxyPortId:0:11}\"\n\n"
"\t#Deletion namespace")
filepx.write("\n\tNFP_P=`ip netns | grep \"nfp-proxy\"`")
filepx.write("\n\tif [ ${#NFP_P} -ne 0 ]; then\n\t\t"
"ip netns delete nfp-proxy")
filepx.write("\n\t\techo \"namespace removed\"\n\tfi")
filepx.write("\n\n\t#pt1 port removing from ovs")
filepx.write("\n\tPORT=`ovs-vsctl show | grep \"$tapName\"`")
filepx.write("\n\tif [ ${#PORT} -ne 0 ]; then")
filepx.write("\n\t\tovs-vsctl del-port br-int $tapName")
filepx.write("\n\t\techo \"ovs port is removed\"")
filepx.write("\n\tfi\n\tpkill nfp_proxy")
filepx.write("\n\n\tgbp pt-delete nfp_proxy_pt")
filepx.write("\n\n\techo \"nfp-proxy cleaning success.... \"\n\n}")
filepx.write("\n\nfunction netmask_to_bitmask {")
filepx.write("\n\tnetmask_bits=$1")
filepx.write("\n\tset -- $(( 5 - ($netmask_bits / 8) )) 255 255 255 255 $(( (255 << (8 - ($netmask_bits % 8))) & 255 )) 0 0 0")
filepx.write("\n\t[ $1 -gt 1 ] && shift $1 || shift")
filepx.write("\n\tnetmask=${1-0}.${2-0}.${3-0}.${4-0}\n}")
filepx.write("\n\nfunction namespace_create {\n\n\tget_openstack_creds")
filepx.write("\n\tSERVICE_MGMT_GROUP=\"svc_management_ptg\"")
filepx.write("\n\tnetmask_bits=`neutron net-list --name l2p_$SERVICE_MGMT_GROUP -F subnets -f value | awk '{print $2}' | awk -F'/' '{print $2}'`")
filepx.write("\n\techo \"Creating new namespace nfp-proxy....\"")
filepx.write("\n\n\t#new namespace with name proxy")
filepx.write("\n\tNFP_P=`ip netns add nfp-proxy`")
filepx.write("\n\tif [ ${#NFP_P} -eq 0 ]; then")
filepx.write("\n\t\techo \"New namepace nfp-proxy create\"")
filepx.write("\n\telse\n\t\techo \"nfp-proxy creation failed\"\n\t\t"
"exit 0")
filepx.write("\n\tfi\n\n\t# create nfp_proxy pt")
filepx.write("\n\tgbp pt-create --policy-target-group $SERVICE_MGMT_GROUP"
" nfp_proxy_pt")
filepx.write("\n\n\t# Get the nfp_proxy_pt port id, mac address")
filepx.write("\n\tproxyPortId=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $2}'`")
filepx.write("\n\tproxyMacAddr=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $6}'`")
filepx.write("\n\tproxyPortIp=`neutron port-list | grep pt_nfp_proxy_pt"
" | awk '{print $11}' | sed 's/^\"\(.*\)\"}$/\\1/'`")
filepx.write("\n\ttapName=\"tap${proxyPortId:0:11}\"")
filepx.write("\n\tnew_ip_cidr=\"$proxyPortIp/$netmask_bits\"")
filepx.write("\n\tnetmask_to_bitmask $netmask_bits\n")
filepx.write("\n\tproxyBrd=`ipcalc -4 $proxyPortIp -m $netmask -b"
" | grep BROADCAST | awk -F '=' '{print $2}'`")
filepx.write("\n\n\t# Create a tap interface and add it"
" to the ovs bridge br-int")
filepx.write("\n\tovs-vsctl add-port br-int $tapName -- set Interface"
" $tapName type=internal")
filepx.write(" external_ids:iface-id=$proxyPortId"
" external_ids:iface-status=active"
" external_ids:attached-mac=$proxyMacAddr")
filepx.write("\n\n\t# Add the tap interface to proxy\n\t"
"ip link set $tapName netns nfp-proxy")
filepx.write("\n\n\t# Get the link up\n\tip netns exec nfp-proxy"
" ip link set $tapName up")
filepx.write("\n\n\t# set the mac address on the tap interface\n\t"
"ip netns exec nfp-proxy"
" ip link set $tapName address $proxyMacAddr")
filepx.write("\n\n\t# assign ip address to the proxy tap interface")
filepx.write("\n\tip netns exec nfp-proxy ip -4 addr add"
" $new_ip_cidr scope global dev $tapName brd $proxyBrd")
filepx.write("\n\n\t# Update the neutron port with the host id binding")
filepx.write("\n\tneutron port-update $proxyPortId"
" --binding:host_id=`hostname`")
filepx.write("\n\n\tPING=`ip netns exec nfp-proxy"
" ping $1 -q -c 2 > /dev/null`")
filepx.write("\n\tif [ ${#PING} -eq 0 ]\n\tthen")
filepx.write("\n\t\techo \"nfp-proxy namespcace creation success and"
" reaching to $1\"")
filepx.write("\n\telse\n\t\techo \"Fails reaching to $1\"")
filepx.write("\n\tfi\n\n\tip netns exec nfp-proxy /usr/bin/nfp_proxy")
filepx.write(" --config-file=$2"
" --log-file /var/log/nfp/nfp_proxy.log")
filepx.write("\n}")
filepx.close()
if os.path.exists("/usr/lib/python2.7/site-packages/gbpservice/nfp/"
"tools/"):
shutil.copy(proxy_tool_file,
"/usr/lib/python2.7/site-packages/gbpservice/nfp/tools/")
else:
os.makedirs("/usr/lib/python2.7/site-packages/gbpservice/nfp/tools")
shutil.copy(proxy_tool_file, "/usr/lib/python2.7/site-packages/gbpservice/nfp/tools/")
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_proxy_ctl():
"""
create nfp proxy systemctl service file
"""
if not os.path.exists("/var/log/nfp"):
os.makedirs("/var/log/nfp")
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_sup_file = TEMP_WORK_DIR + "/nfpproxy_startup"
try:
filepx = open(proxy_sup_file, 'w+')
except:
print(("Error creating " + proxy_sup_file + " file"))
sys.exit(1)
filepx.write("#!/usr/bin/sh\nNFP_PROXY_AGENT_INI=/etc/nfp.ini")
filepx.write("\nCONFIGURATOR_IP=`crudini --get $NFP_PROXY_AGENT_INI"
" PROXY nfp_controller_ip`\n")
filepx.write(". /usr/lib/python2.7/site-packages/gbpservice/nfp/tools/"
"nfp_namespace;")
filepx.write("namespace_delete ;namespace_create $CONFIGURATOR_IP $NFP_PROXY_AGENT_INI")
filepx.close()
proxy_ctl_file = TEMP_WORK_DIR + "/nfp_proxy.service"
try:
file = open(proxy_ctl_file, 'w+')
except:
print(("Error creating " + proxy_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy\n")
file.write("After=syslog.target network.target\n\n")
file.write("\n[Service]\nUser=root\nExecStart=/usr/bin/nfpproxy_startup")
file.write("\nRestart=on-abort")
file.write("\n\n[Install]\nWantedBy=multi-user.target")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(proxy_ctl_file, "/usr/lib/systemd/system/")
else:
print("error: /usr/lib/systemd/system not present")
sys.exit(1)
if os.path.exists("/usr/bin"):
shutil.copy(proxy_sup_file, "/usr/bin/")
os.system("chmod +x /usr/bin/nfpproxy_startup")
else:
print("error: /usr/bin not present")
sys.exit(1)
subprocess.call(["systemctl", "enable", "nfp_proxy"])
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def create_proxy_agent_ctl():
"""
create nfp proxy agent systemctl service file
"""
if not os.path.exists(TEMP_WORK_DIR):
os.makedirs(TEMP_WORK_DIR)
proxy_ctl_file = TEMP_WORK_DIR + "/nfp_proxy_agent.service"
try:
file = open(proxy_ctl_file, 'w+')
except:
print(("Error creating " + proxy_ctl_file + " file"))
sys.exit(1)
file.write("[Unit]\nDescription=One Convergence NFP Proxy Agent")
file.write("\nAfter=syslog.target network.target\n")
file.write("\n[Service]\nUser=root")
file.write("\nExecStart=/usr/bin/nfp --module proxy_agent "
"--config-file /etc/neutron/neutron.conf ")
file.write("--config-file /etc/nfp.ini ")
file.write("--log-file /var/log/nfp/nfp_proxy_agent.log\n")
file.write("\n[Install]\nWantedBy=multi-user.target\n")
file.close()
if os.path.exists("/usr/lib/systemd/system"):
shutil.copy(proxy_ctl_file, "/usr/lib/systemd/system/")
else:
print("error: /usr/lib/systemd/system not present")
sys.exit(1)
subprocess.call(["systemctl", "enable", "nfp_proxy_agent"])
try:
shutil.rmtree(TEMP_WORK_DIR)
except:
print("Error: Cleaning up the temp directory")
sys.exit(1)
def get_openstack_creds():
CONFIG.read(NEUTRON_CONF)
AUTH_URI = CONFIG.get('keystone_authtoken', 'auth_uri')
AUTH_USER = CONFIG.get('keystone_authtoken', 'username')
AUTH_PASSWORD = CONFIG.get('keystone_authtoken', 'password')
AUTH_TENANT_NAME = CONFIG.get('keystone_authtoken', 'project_name')
os.environ["OS_USERNAME"] = AUTH_USER
os.environ["OS_TENANT_NAME"] = AUTH_TENANT_NAME
os.environ["OS_PASSWORD"] = AUTH_PASSWORD
os.environ["OS_AUTH_URL"] = AUTH_URI
def create_nfp_resources():
"""
create nfp resources
"""
get_openstack_creds()
os.system("gbp l3policy-create default-nfp --ip-pool 172.16.0.0/16"
" --subnet-prefix-length 20 --proxy-ip-pool=172.17.0.0/16")
l3policy_Id = subprocess.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s' | awk '{print $2}'")[1]
os.system("gbp l2policy-create --l3-policy " +
l3policy_Id + " svc_management_ptg")
l2policy_Id = subprocess.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-create svc_management_ptg --service_management True"
" --l2-policy " + l2policy_Id)
# Create GBP Resources Heat stack
os.system("heat stack-create --poll --template-file " + TEMPLATES_PATH +
" gbp_services_stack")
def add_nova_key_pair():
tools_dir = os.path.dirname(__file__)
tools_dir = os.path.realpath(tools_dir)
if not tools_dir:
# if script is executed from current dir, get abs path
tools_dir = os.path.realpath('./')
os.chdir(tools_dir)
subprocess.call(["mkdir", "-p", "keys"])
configurator_key_name = "configurator_key"
print("Creating nova keypair for configurator VM.")
pem_file_content = subprocess.getoutput("nova keypair-add" + " " + configurator_key_name)
with open("keys/configurator_key.pem", "w") as f:
f.write(pem_file_content)
os.chmod("keys/configurator_key.pem", 0o600)
return configurator_key_name
def launch_configurator():
get_openstack_creds()
if os.path.isfile(args.controller_path):
os.system("glance image-create --name nfp_controller"
" --disk-format qcow2 --container-format bare"
" --visibility public --file " + args.controller_path)
else:
print(("Error " + args.controller_path + " does not exist"))
sys.exit(1)
# add nova keypair for nfp_controller VM.
configurator_key_name = add_nova_key_pair()
Port_id = subprocess.getstatusoutput(
"gbp policy-target-create --policy-target-group svc_management_ptg"
" nfp_controllerVM_instance | grep port_id | awk '{print $4}'")[1]
Image_id = subprocess.getstatusoutput(
"glance image-list | grep nfp_controller |awk '{print $2}'")[1]
if Image_id and Port_id:
os.system("nova boot --flavor m1.medium --image " +
Image_id + " --user-data " + CONFIGURATOR_USER_DATA +
" --key-name " + configurator_key_name +
" --nic port-id=" + Port_id + " nfp_controllerVM_instance")
else:
if not Port_id:
print("Error unable to create the controller port id")
else:
print("Error unable to get nfp_controller image info")
sys.exit(1)
def configure_ext_net(ext_net_name):
os.system("crudini --set /etc/nfp.ini heat_driver"
" internet_out_network_name %s"
% (ext_net_name))
subprocess.call("systemctl restart nfp_orchestrator".split(' '))
def clean_up():
"""
clean up nfp resources
"""
get_openstack_creds()
InstanceId = subprocess.getstatusoutput(
"nova list | grep nfp_controllerVM_instance | awk '{print $2}'")[1]
if InstanceId:
os.system("nova delete " + InstanceId)
time.sleep(10)
PolicyTargetId = subprocess.getstatusoutput(
"gbp policy-target-list | grep nfp_controllerVM_instance"
" | awk '{print $2}'")[1]
if PolicyTargetId:
os.system("gbp policy-target-delete " + PolicyTargetId)
ImageId = subprocess.getstatusoutput(
"glance image-list | grep nfp_controller | awk '{print $2}'")[1]
if ImageId:
os.system("glance image-delete " + ImageId)
ServiceMGMTId = subprocess.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
if ServiceMGMTId:
SvcGroupId = subprocess.getstatusoutput(
"gbp group-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l2policyId = subprocess.getstatusoutput(
"gbp l2policy-list | grep '\ssvc_management_ptg\s'"
" | awk '{print $2}'")[1]
l3policyId = subprocess.getstatusoutput(
"gbp l3policy-list | grep '\sdefault-nfp\s'"
" | awk '{print $2}'")[1]
os.system("gbp group-delete " + SvcGroupId)
os.system("gbp l2policy-delete " + l2policyId)
os.system("gbp l3policy-delete " + l3policyId)
HeatId = subprocess.getstatusoutput(
"heat stack-list | grep '\sgbp_services_stack\s'"
" | awk '{print $2}'")[1]
if HeatId:
os.system("heat stack-delete gbp_services_stack -y")
def main():
if args.configure_nfp:
configure_nfp()
elif args.build_controller_vm:
build_configuration_vm()
elif args.enable_orchestrator:
create_orchestrator_ctl()
restart_nfp_orchestrator()
restart_nfp_config_orch()
elif args.enable_proxy:
create_nfp_namespace_file()
create_proxy_ctl()
restart_nfp_proxy()
create_proxy_agent_ctl()
restart_nfp_proxy_agent()
elif args.create_resources:
create_nfp_resources()
elif args.launch_controller:
if args.controller_path:
launch_configurator()
else:
parser.print_help()
elif args.configure_ext_net:
if args.ext_net_name != '':
configure_ext_net(args.ext_net_name)
else:
parser.print_help()
elif args.clean_up_nfp:
clean_up()
else:
parser.print_help()
if __name__ == '__main__':
main()

View File

@ -1,322 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import time
import netaddr
import netifaces
from oslo_log import log as logging
import oslo_serialization.jsonutils as jsonutils
import pecan
from pecan import rest
import yaml
from gbpservice._i18n import _
LOG = logging.getLogger(__name__)
SUCCESS = 'SUCCESS'
FAILED = 'FAILED'
notifications = []
FW_SCRIPT_PATH = ("/usr/local/lib/python2.7/dist-packages/" +
"gbpservice/contrib/nfp_service/" +
"reference_configurator/scripts/configure_fw_rules.py")
class Controller(rest.RestController):
"""Implements all the APIs Invoked by HTTP requests.
Implements following HTTP methods.
-get
-post
"""
def __init__(self, method_name):
try:
self.method_name = "network_function_device_notification"
super(Controller, self).__init__()
ip_a = subprocess.Popen('ifconfig -a', shell=True,
stdout=subprocess.PIPE).stdout.read()
out1 = subprocess.Popen('dhclient eth0', shell=True,
stdout=subprocess.PIPE).stdout.read()
out2 = subprocess.Popen('dhclient eth0', shell=True,
stdout=subprocess.PIPE).stdout.read()
output = "%s\n%s\n%s" % (ip_a, out1, out2)
LOG.info("Dhclient on eth0, result: %(output)s",
{'output': output})
except Exception as err:
msg = (
"Failed to initialize Controller class %s." %
str(err).capitalize())
LOG.error(msg)
def _push_notification(self, context,
notification_data, service_type):
response = {'info': {'service_type': service_type,
'context': context},
'notification': notification_data
}
notifications.append(response)
@pecan.expose(method='GET', content_type='application/json')
def get(self):
"""Method of REST server to handle request get_notifications.
This method send an RPC call to configurator and returns Notification
data to config-agent
Returns: Dictionary that contains Notification data
"""
global notifications
try:
notification_data = jsonutils.dumps(notifications)
msg = ("NOTIFICATION_DATA sent to config_agent %s"
% notification_data)
LOG.info(msg)
notifications = []
return notification_data
except Exception as err:
pecan.response.status = 500
msg = ("Failed to get notification_data %s."
% str(err).capitalize())
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
@pecan.expose(method='POST', content_type='application/json')
def post(self, **body):
try:
body = None
if pecan.request.is_body_readable:
body = pecan.request.json_body
msg = ("Request data:: %s" % body)
LOG.debug(msg)
config_datas = body['config']
service_type = body['info']['service_type']
notification_data = []
for config_data in config_datas:
try:
resource = config_data['resource']
if resource == 'healthmonitor':
self._configure_healthmonitor(config_data)
elif resource == 'interfaces':
self._configure_interfaces(config_data)
elif resource == 'routes':
self._add_routes(config_data)
elif (config_data['resource'] in ['ansible', 'heat',
'custom_json']):
self._apply_user_config(config_data)
else:
status_msg = 'Unsupported resource'
notification_data.append(
{'resource': resource,
'data': {'status_code': FAILED,
'status_msg': status_msg}})
notification_data.append(
{'resource': config_data['resource'],
'data': {'status_code': SUCCESS}})
except Exception as ex:
notification_data.append(
{'resource': resource,
'data': {'status_code': FAILED,
'status_msg': str(ex)}})
context = body['info']['context']
self._push_notification(context, notification_data,
service_type)
except Exception as err:
pecan.response.status = 500
msg = ("Failed to serve HTTP post request %s %s."
% (self.method_name, str(err).capitalize()))
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
def _format_description(self, msg):
"""This method formats error description.
:param msg: An error message that is to be formatted
Returns: error_data dictionary
"""
return {'failure_desc': {'msg': msg}}
def _configure_healthmonitor(self, config_data):
LOG.info("Configures healthmonitor with configuration "
"data : %(healthmonitor_data)s ",
{'healthmonitor_data': config_data})
def _configure_interfaces(self, config_data):
out1 = subprocess.Popen('sudo dhclient eth1', shell=True,
stdout=subprocess.PIPE).stdout.read()
out2 = subprocess.Popen('sudo dhclient eth2', shell=True,
stdout=subprocess.PIPE).stdout.read()
out3 = subprocess.Popen('cat /etc/network/interfaces', shell=True,
stdout=subprocess.PIPE).stdout.read()
output = "%s\n%s\n%s" % (out1, out2, out3)
LOG.info("Dhclient on eth0, result: %(initial_data)s",
{'initial_data': output})
LOG.info("Configures interfaces with configuration "
"data : %(interface_data)s ",
{'interface_data': config_data})
def get_source_cidrs_and_gateway_ip(self, route_info):
nfds = route_info['resource_data']['nfds']
source_cidrs = []
for nfd in nfds:
for network in nfd['networks']:
source_cidrs.append(network['cidr'])
if network['type'] == 'stitching':
gateway_ip = network['gw_ip']
return source_cidrs, gateway_ip
def _add_routes(self, route_info):
LOG.info("Configuring routes with configuration "
"data : %(route_data)s ",
{'route_data': route_info['resource_data']})
source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip(
route_info)
default_route_commands = []
for cidr in source_cidrs:
try:
source_interface = self._get_if_name_by_cidr(cidr)
except Exception:
raise Exception(_("Some of the interfaces do not have "
"IP Address"))
try:
interface_number_string = source_interface.split("eth", 1)[1]
except IndexError:
LOG.error("Retrieved wrong interface %(interface)s for "
"configuring routes",
{'interface': source_interface})
try:
routing_table_number = 20 + int(interface_number_string)
ip_rule_command = "ip rule add from %s table %s" % (
cidr, routing_table_number)
out1 = subprocess.Popen(ip_rule_command, shell=True,
stdout=subprocess.PIPE).stdout.read()
ip_rule_command = "ip rule add to %s table main" % (cidr)
out2 = subprocess.Popen(ip_rule_command, shell=True,
stdout=subprocess.PIPE).stdout.read()
ip_route_command = "ip route add table %s default via %s" % (
routing_table_number, gateway_ip)
default_route_commands.append(ip_route_command)
output = "%s\n%s" % (out1, out2)
LOG.info("Static route configuration result: %(output)s",
{'output': output})
except Exception as ex:
raise Exception(_("Failed to add static routes: %(ex)s") % {
'ex': str(ex)})
for command in default_route_commands:
try:
out = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE).stdout.read()
LOG.info("Static route configuration result: %(output)s",
{'output': out})
except Exception as ex:
raise Exception(_("Failed to add static routes: %(ex)s") % {
'ex': str(ex)})
def _get_if_name_by_cidr(self, cidr):
interfaces = netifaces.interfaces()
retry_count = 0
while True:
all_interfaces_have_ip = True
for interface in interfaces:
inet_list = netifaces.ifaddresses(interface).get(
netifaces.AF_INET)
if not inet_list:
all_interfaces_have_ip = False
for inet_info in inet_list or []:
netmask = inet_info.get('netmask')
ip_address = inet_info.get('addr')
subnet_prefix = cidr.split("/")
if (ip_address == subnet_prefix[0] and (
len(subnet_prefix) == 1 or subnet_prefix[1] == "32")):
return interface
ip_address_netmask = '%s/%s' % (ip_address, netmask)
interface_cidr = netaddr.IPNetwork(ip_address_netmask)
if str(interface_cidr.cidr) == cidr:
return interface
# Sometimes the hotplugged interface takes time to get IP
if not all_interfaces_have_ip:
if retry_count < 10:
time.sleep(3)
retry_count = retry_count + 1
continue
else:
raise Exception(_("Some of the interfaces do not have "
"IP Address"))
def _apply_user_config(self, config_data):
LOG.info("Applying user config with configuration "
"type : %(config_type)s and "
"configuration data : %(config_data)s ",
{'config_type': config_data['resource'],
'config_data': config_data['resource_data']})
service_config = config_data['resource_data'][
'config_string']
service_config = str(service_config)
if config_data['resource'] == 'ansible':
config_str = service_config.lstrip('ansible:')
rules = config_str
elif config_data['resource'] == 'heat':
config_str = service_config.lstrip('heat_config:')
rules = self._get_rules_from_config(config_str)
elif config_data['resource'] == 'custom_json':
config_str = service_config.lstrip('custom_json:')
rules = config_str
fw_rule_file = FW_SCRIPT_PATH
command = ("sudo python " + fw_rule_file + " '" +
rules + "'")
subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True)
def _get_rules_from_config(self, config_str):
rules_list = []
try:
stack_template = (jsonutils.loads(config_str) if
config_str.startswith('{') else
yaml.load(config_str))
except Exception:
return config_str
resources = stack_template['resources']
for resource in resources:
if resources[resource]['type'] == 'OS::Neutron::FirewallRule':
rule_info = {}
destination_port = ''
rule = resources[resource]['properties']
protocol = rule['protocol']
rule_info['action'] = 'log'
rule_info['name'] = protocol
if rule.get('destination_port'):
destination_port = rule['destination_port']
if protocol == 'tcp':
rule_info['service'] = (protocol + '/' +
str(destination_port))
else:
rule_info['service'] = protocol
rules_list.append(rule_info)
return jsonutils.dumps({'rules': rules_list})

View File

@ -1,104 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from subprocess import call
from subprocess import PIPE
from subprocess import Popen
import sys
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class ConfigureIPtables(object):
def __init__(self, json_blob):
ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE)
output = ps.communicate()[0]
if "0" in output:
LOG.info("Enabling IP forwarding ...")
call(["sysctl", "-w", "net.ipv4.ip_forward=1"])
else:
LOG.info("IP forwarding already enabled")
try:
self.rules_json = jsonutils.loads(json_blob)
except ValueError:
sys.exit('Given json_blob is not a valid json')
def update_chain(self):
ps = Popen(["iptables", "-L"], stdout=PIPE)
output = ps.communicate()[0]
# check if chain is present if not create new chain
if "testchain" not in output:
LOG.info("Creating new chain ...")
call(["iptables", "-F"])
call(["iptables", "-N", "testchain"])
call(
["iptables", "-t", "filter",
"-A", "FORWARD", "-j", "testchain"])
call(["iptables", "-A", "FORWARD", "-j", "DROP"])
# flush chain of existing rules
call(["iptables", "-F", "testchain"])
# return
# Update chain with new rules
LOG.info("Updating chain with new rules ...")
count = 0
for rule in self.rules_json.get('rules'):
LOG.info("adding rule %(count)d", {'count': count})
try:
action_values = ["LOG", "ACCEPT"]
action = rule['action'].upper()
if action not in action_values:
sys.exit(
"Action %s is not valid action! Please enter "
"valid action (LOG or ACCEPT)" % (action))
service = rule['service'].split('/')
except KeyError as e:
sys.exit('KeyError: Rule does not have key %s' % (e))
if len(service) > 1:
ps = Popen(["iptables", "-A", "testchain", "-p", service[
0], "--dport", service[1], "-j", action],
stdout=PIPE)
else:
ps = Popen(
["iptables", "-A", "testchain", "-p", service[0],
"-j", action], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(msg)s",
{'msg': output})
count = count + 1
ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state",
"ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE)
output = ps.communicate()[0]
if output:
LOG.error("Unable to add rule to chain due to: %(output)s",
{'output': output})
def main():
if len(sys.argv) < 2:
sys.exit('Usage: %s json-blob' % sys.argv[0])
else:
json_blob = sys.argv[1]
test = ConfigureIPtables(json_blob)
test.update_chain()
if __name__ == "__main__":
main()

View File

@ -1,93 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from gbpservice._i18n import _
from gbpservice.common import utils
from gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers \
import traffic_stitching_plumber as tscp
LOG = logging.getLogger(__name__)
TSCP_OPTS = [
cfg.StrOpt('plumbing_resource_owner_user',
help=_("Username of the Openstack keystone user who owns the "
"resources created by the traffic stitching plumber")),
cfg.StrOpt('plumbing_resource_owner_password',
help=_("Openstack keystone password for the user who "
"owns the resources created by the traffic stitching "
"plumber"),
secret=True),
cfg.StrOpt('plumbing_resource_owner_tenant_name',
help=_("Name of the Tenant that will own the plumber created "
" resources"),)
]
cfg.CONF.register_opts(TSCP_OPTS, "admin_owned_resources_apic_tscp")
class AdminOwnedResourcesApicTSCP(tscp.TrafficStitchingPlumber):
"""Traffic Stitching Plumber for APIC with Admin owned resources.
This plumber for APIC mapping provides the ability to choose the user and
who owns the resources created by the plumber.
"""
def initialize(self):
self._resource_owner_tenant_id = None
super(AdminOwnedResourcesApicTSCP, self).initialize()
@property
def resource_owner_tenant_id(self):
if not self._resource_owner_tenant_id:
self._resource_owner_tenant_id = (
self._get_resource_owner_tenant_id())
return self._resource_owner_tenant_id
def plug_services(self, context, deployment):
context = self._get_resource_owner_context(context)
super(AdminOwnedResourcesApicTSCP, self).plug_services(
context, deployment)
def unplug_services(self, context, deployment):
context = self._get_resource_owner_context(context)
super(AdminOwnedResourcesApicTSCP, self).unplug_services(
context, deployment)
def _get_resource_owner_tenant_id(self):
user, pwd, tenant, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
try:
tenant = keystoneclient.tenants.find(name=tenant)
return tenant.id
except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True):
LOG.error('Multiple tenants matches found for %s', tenant)
def _get_resource_owner_context(self, context):
resource_owner_context = context.elevated()
resource_owner_context.tenant_id = self.resource_owner_tenant_id
user, pwd, _, auth_url = utils.get_keystone_creds()
keystoneclient = keyclient.Client(username=user, password=pwd,
auth_url=auth_url)
resource_owner_context.auth_token = keystoneclient.get_token(
self.resource_owner_tenant_id)
return resource_owner_context

View File

@ -1,186 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from gbpservice.neutron.extensions import driver_proxy_group as pg_ext
from gbpservice.neutron.extensions import group_policy
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
from gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers \
import common
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
LOG = logging.getLogger(__name__)
TSCP_RESOURCE_PREFIX = 'tscp_'
class TrafficStitchingPlumber(plumber_base.NodePlumberBase):
"""Traffic Stitching Plumber (TScP).
uses the GBP underlying constructs in order to guarantee a correct traffic
flow across services from their provider to the consumer and vice versa.
The output of the plumbing operations will be either the creation or
deletion of a set of Service Targets, which effectively result in creation
of Policy Targets exposed to the specific Node Driver for its own use.
In addition to that, TScP will create a set of L2Ps and/or PTGs that are
"stitched" together and host the actual service PTs. The proxy_group
extension is a requirement for this plumber to work.
"""
def initialize(self):
self._gbp_plugin = None
self._sc_plugin = None
# Verify that proxy_group extension is loaded
if pg_ext.PROXY_GROUP not in cfg.CONF.group_policy.extension_drivers:
LOG.error("proxy_group GBP driver extension is mandatory for "
"traffic stitching plumber.")
raise exc.GroupPolicyDeploymentError()
@property
def gbp_plugin(self):
if not self._gbp_plugin:
self._gbp_plugin = directory.get_plugin("GROUP_POLICY")
return self._gbp_plugin
@property
def sc_plugin(self):
if not self._sc_plugin:
self._sc_plugin = directory.get_plugin("SERVICECHAIN")
return self._sc_plugin
def plug_services(self, context, deployment):
if deployment:
provider = deployment[0]['context'].provider
management = deployment[0]['context'].management
# Sorted from provider (N) to consumer (0)
# TODO(ivar): validate number of interfaces per service per service
# type is as expected
self._sort_deployment(deployment)
for part in deployment:
info = part['plumbing_info']
if not info:
continue
part_context = part['context']
# Management PT can be created immediately
self._create_service_target(
context, part_context, info.get('management', []),
management, 'management')
# Create proper PTs based on the service type
jump_ptg = None
LOG.info("Plumbing service of type '%s'",
info['plumbing_type'])
if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT:
# No stitching needed, only provider side PT is created.
# overriding PT name in order to keep port security up
# for this kind of service.
node = part_context.current_node
instance = part_context.instance
for provider_info in info.get('provider', []):
provider_info['name'] = ("tscp_endpoint_service_%s_%s"
% (node['id'][:5], instance['id'][:5]))
self._create_service_target(
context, part_context, info.get('provider', []),
provider, 'provider')
elif info['plumbing_type'] == common.PLUMBING_TYPE_GATEWAY:
# L3 stitching needed, provider and consumer side PTs are
# created. One proxy_gateway is needed in consumer side
jump_ptg = self._create_l3_jump_group(
context, provider, part['context'].current_position)
# On provider side, this service is the default gateway
info['provider'][0]['group_default_gateway'] = True
self._create_service_target(
context, part_context, info['provider'],
provider, 'provider')
# On consumer side, this service is the proxy gateway
info['consumer'][0]['proxy_gateway'] = True
self._create_service_target(
context, part_context, info['consumer'], jump_ptg,
'consumer')
elif info['plumbing_type'] == common.PLUMBING_TYPE_TRANSPARENT:
# L2 stitching needed, provider and consumer side PTs are
# created
self._create_service_target(
context, part_context, info.get('provider', []),
provider, 'provider')
jump_ptg = self._create_l2_jump_group(
context, provider, part['context'].current_position)
self._create_service_target(
context, part_context, info['consumer'],
jump_ptg, 'consumer')
else:
LOG.warning("Unsupported plumbing type %s",
info['plumbing_type'])
# Replace current "provider" with jump ptg if needed
provider = jump_ptg or provider
def unplug_services(self, context, deployment):
# Sorted from provider (0) to consumer (N)
if not deployment:
return
self._sort_deployment(deployment)
provider = deployment[0]['context'].provider
for part in deployment:
self._delete_service_targets(context, part)
# Delete jump PTGs
jump_ptgs = []
while provider['proxy_group_id']:
try:
proxy = self.gbp_plugin.get_policy_target_group(
context, provider['proxy_group_id'])
jump_ptgs.append(proxy)
except group_policy.PolicyTargetGroupNotFound as ex:
LOG.info(ex.message)
# If this proxy doesn't exist, then subsequent ones won't too
break
provider = proxy
for jump_ptg in reversed(jump_ptgs):
try:
self.gbp_plugin.delete_policy_target_group(
context, jump_ptg['id'])
except group_policy.PolicyTargetGroupNotFound as ex:
LOG.info(ex.message)
def _create_l3_jump_group(self, context, proxied, position):
return self._create_jump_group(
context, proxied, position, pg_ext.PROXY_TYPE_L3)
def _create_l2_jump_group(self, context, proxied, position):
return self._create_jump_group(
context, proxied, position, pg_ext.PROXY_TYPE_L2)
def _create_jump_group(self, context, proxied, position, type):
data = {
"name": (TSCP_RESOURCE_PREFIX + str(position) + "_" +
proxied['name']),
"description": "Implicitly created stitching group",
"l2_policy_id": None,
"proxied_group_id": proxied['id'],
"proxy_type": type,
"proxy_group_id": constants.ATTR_NOT_SPECIFIED,
"network_service_policy_id": None,
"service_management": False
}
return self.gbp_plugin.create_policy_target_group(
context, {'policy_target_group': data})
def _create_service_target(self, *args, **kwargs):
kwargs['extra_data'] = {'proxy_gateway': False,
'group_default_gateway': False}
super(TrafficStitchingPlumber, self)._create_service_target(
*args, **kwargs)

View File

@ -1,300 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
from neutron.common import config
from neutron_lib import context as n_ctx
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.services.servicechain.plugins.ncp import (
plugin as ncp_plugin)
from gbpservice.neutron.services.servicechain.plugins.ncp import context
from gbpservice.neutron.tests.unit.db.grouppolicy import (
test_servicechain_db as test_servicechain_db)
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
cfg.CONF.import_opt(
'node_drivers',
'gbpservice.neutron.services.servicechain.plugins.ncp.config',
group='node_composition_plugin')
class ServiceChainNCPTestPlugin(ncp_plugin.NodeCompositionPlugin):
supported_extension_aliases = ['servicechain'] + (
test_group_policy_db.UNSUPPORTED_REQUIRED_EXTS)
path_prefix = "/servicechain"
SC_PLUGIN_KLASS = (ServiceChainNCPTestPlugin.__module__ + '.' +
ServiceChainNCPTestPlugin.__name__)
class ServiceChainPluginTestCase(test_servicechain_db.ServiceChainDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None, gp_plugin=None):
super(ServiceChainPluginTestCase, self).setUp(core_plugin=core_plugin,
sc_plugin=sc_plugin or
SC_PLUGIN_KLASS,
gp_plugin=gp_plugin)
try:
config.cfg.CONF.keystone_authtoken.username
except config.cfg.NoSuchOptError:
config.cfg.CONF.register_opt(
config.cfg.StrOpt('username'),
'keystone_authtoken')
try:
config.cfg.CONF.keystone_authtoken.password
except config.cfg.NoSuchOptError:
config.cfg.CONF.register_opt(
config.cfg.StrOpt('password'),
'keystone_authtoken')
try:
config.cfg.CONF.keystone_authtoken.project_name
except config.cfg.NoSuchOptError:
config.cfg.CONF.register_opt(
config.cfg.StrOpt('project_name'),
'keystone_authtoken')
class BaseTestGroupPolicyPluginGroupResources(
ServiceChainPluginTestCase,
test_servicechain_db.TestServiceChainResources):
def test_spec_shared(self):
# Shared spec can only point shared nodes
node = self._create_profiled_servicechain_node(
'LOADBALANCERV2', shared=True, shared_profile=True,
profile_tenant_id='admin', tenant_id='admin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=201)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='admin',
expected_res_status=201)
node = self._create_profiled_servicechain_node(
'LOADBALANCERV2', shared=False, profile_tenant_id='nonadmin',
tenant_id='nonadmin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=404)
self.create_servicechain_spec(nodes=[node['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=400)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='nonadmin',
expected_res_status=201)
def test_node_shared(self):
# Shared node can only point shared profile
prof = self.create_service_profile(
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
to_update = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)['servicechain_node']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False, tenant_id='admin',
expected_res_status=201)
prof = self.create_service_profile(
service_type='LOADBALANCERV2', shared=False,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=404)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
tenant_id='admin', expected_res_status=400)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
tenant_id='admin', expected_res_status=201)
self.create_servicechain_spec(nodes=[to_update['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=201)
data = {'servicechain_node': {'shared': False}}
req = self.new_update_request('servicechain_nodes', data,
to_update['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
def test_profile_shared(self):
prof = self.create_service_profile(
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)
data = {'service_profile': {'shared': False}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
prof = self.create_service_profile(
service_type='LOADBALANCERV2', shared=False)['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
expected_res_status=201)
data = {'service_profile': {'shared': True}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertTrue(res['service_profile']['shared'])
def test_node_context_profile(self):
# Current node with profile
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = self._tenant_id
prof = self.create_service_profile(
service_type='LOADBALANCERV2')['service_profile']
current = self.create_servicechain_node(
service_profile_id=prof['id'],
expected_res_status=201)['servicechain_node']
ctx = context.NodeDriverContext(self.plugin, plugin_context,
None, None, current, 0,
prof, None)
self.assertIsNone(ctx.original_node)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current_node, current)
self.assertEqual(ctx.current_profile, prof)
# Original node with profile
prof2 = self.create_service_profile(
service_type='LOADBALANCERV2')['service_profile']
original = self.create_servicechain_node(
service_profile_id=prof2['id'],
expected_res_status=201)['servicechain_node']
ctx = context.NodeDriverContext(
self.plugin, plugin_context, None, None, current, 0,
prof, None, original_service_chain_node=original,
original_service_profile=prof2)
self.assertEqual(ctx.original_node, original)
self.assertEqual(ctx.original_profile, prof2)
self.assertEqual(ctx.current_node, current)
self.assertEqual(ctx.current_profile, prof)
def test_node_context_no_profile(self):
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test_tenant'
current = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.NodeDriverContext(self.plugin, plugin_context,
None, None, current, 0,
None, None)
self.assertIsNone(ctx.original_node)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current_node, current)
self.assertIsNone(ctx.current_profile)
original = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.NodeDriverContext(
self.plugin, plugin_context, None, None, current, 0,
None, None, original_service_chain_node=original)
self.assertEqual(ctx.original_node, original)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current_node, current)
self.assertIsNone(ctx.current_profile)
def test_spec_parameters(self):
params_node_1 = ['p1', 'p2', 'p3']
params_node_2 = ['p4', 'p5', 'p6']
params_node_3 = ['p7', 'p8', 'p9']
def params_dict(params):
return jsonutils.dumps({'Parameters':
dict((x, {}) for x in params)})
prof = self.create_service_profile(
service_type='LOADBALANCERV2', shared=True,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters
node1 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_1),
expected_res_status=201)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_2),
expected_res_status=201)['servicechain_node']
# Create SC spec with the nodes assigned
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']], shared=True,
expected_res_status=201)['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1 + params_node_2),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec removing one node
self.update_servicechain_spec(spec['id'], nodes=[node1['id']],
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec without modifying the node list
self.update_servicechain_spec(spec['id'],
name='new_name',
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update a node with new config params
self.update_servicechain_node(node1['id'],
config=params_dict(params_node_3),
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_3),
collections.Counter(ast.literal_eval(spec['config_param_names'])))

View File

@ -1,624 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from unittest import mock
import heatclient
from neutron_lib.api.definitions import external_net
from neutron_lib import context as neutron_context
from neutron_lib.plugins import constants
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import webob
from gbpservice.neutron.services.servicechain.plugins.ncp import config
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
heat_node_driver as heat_node_driver)
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
openstack_heat_api_client as heatClient)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as test_ncp_plugin)
STACK_ACTION_WAIT_TIME = 15
class MockStackObject(object):
def __init__(self, status):
self.stack_status = status
class MockHeatClientFunctionsDeleteNotFound(object):
def delete(self, stack_id):
raise heatclient.exc.HTTPNotFound()
def create(self, **fields):
return {'stack': {'id': uuidutils.generate_uuid()}}
def get(self, stack_id):
return MockStackObject('DELETE_COMPLETE')
class MockHeatClientFunctions(object):
def delete(self, stack_id):
pass
def create(self, **fields):
return {'stack': {'id': uuidutils.generate_uuid()}}
def get(self, stack_id):
return MockStackObject('DELETE_COMPLETE')
def update(self, *args, **fields):
return {'stack': {'id': uuidutils.generate_uuid()}}
class MockHeatClientDeleteNotFound(object):
def __init__(self, api_version, endpoint, **kwargs):
self.stacks = MockHeatClientFunctionsDeleteNotFound()
class MockHeatClient(object):
def __init__(self, api_version, endpoint, **kwargs):
self.stacks = MockHeatClientFunctions()
self.resources = mock.MagicMock()
class HeatNodeDriverTestCase(
test_ncp_plugin.NodeCompositionPluginTestCase):
DEFAULT_LB_CONFIG_DICT = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"test_pool": {
"Type": "OS::Neutron::LBaaS::Pool",
"Properties": {
"description": "Haproxy pool from template",
"lb_algorithm": "ROUND_ROBIN",
"protocol": "HTTP",
'listener': {'get_resource': 'listener'},
}
},
"test_listener": {
"Type": "OS::Neutron::LBaaS::Listener",
"Properties": {
"protocol": "HTTP",
"protocol_port": 80,
}
},
"test_lb": {
"Type": "OS::Neutron::LBaaS::LoadBalancer",
"Properties": {
"provider": 'haproxy',
'vip_address': '1.1.1.1',
'vip_subnet': '1.1.1.0/24',
}
}
}
}
DEFAULT_LB_CONFIG = jsonutils.dumps(DEFAULT_LB_CONFIG_DICT)
DEFAULT_FW_CONFIG_DICT = {
"heat_template_version": "2013-05-23",
"resources": {
'test_fw': {
"type": "OS::Neutron::Firewall",
"properties": {
"admin_state_up": True,
"firewall_policy_id": {
"get_resource": "Firewall_policy"},
"name": "testFirewall",
"description": "test Firewall"
}
},
'test_fw_policy': {
"type": "OS::Neutron::FirewallPolicy",
"properties": {
"shared": False,
"description": "test firewall policy",
"name": "testFWPolicy",
"firewall_rules": [{
"get_resource": "Rule_1"}],
"audited": True
}
}
}
}
DEFAULT_FW_CONFIG = jsonutils.dumps(DEFAULT_FW_CONFIG_DICT)
SERVICE_PROFILE_VENDOR = 'heat_based_node_driver'
def setUp(self):
config.cfg.CONF.set_override('stack_action_wait_time',
STACK_ACTION_WAIT_TIME,
group='heat_node_driver')
mock.patch(heatclient.__name__ + ".client.Client",
new=MockHeatClient).start()
super(HeatNodeDriverTestCase, self).setUp(
node_drivers=['heat_node_driver'],
node_plumber='stitching_plumber',
core_plugin=test_gp_driver.CORE_PLUGIN)
def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(zip([x.replace('__', ':') for x in kwargs],
list(kwargs.values())))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(HeatNodeDriverTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)
def test_manager_initialized(self):
mgr = self.plugin.driver_manager
self.assertIsInstance(mgr.ordered_drivers[0].obj,
heat_node_driver.HeatNodeDriver)
for driver in mgr.ordered_drivers:
self.assertTrue(driver.obj.initialized)
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, profile_id=None, **kwargs):
if not profile_id:
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
vendor=self.SERVICE_PROFILE_VENDOR,
tenant_id=profile_tenant_id or self._tenant_id)[
'service_profile']
else:
prof = self.get_service_profile(profile_id)
service_config = kwargs.get('config')
if not service_config or service_config == '{}':
if service_type == constants.FIREWALL:
kwargs['config'] = self.DEFAULT_FW_CONFIG
else:
kwargs['config'] = self.DEFAULT_LB_CONFIG
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
class TestServiceChainInstance(HeatNodeDriverTestCase):
def _get_node_instance_stacks(self, sc_node_id):
context = neutron_context.get_admin_context()
with context.session.begin(subtransactions=True):
return (context.session.query(
heat_node_driver.ServiceNodeInstanceStack).
filter_by(sc_node_id=sc_node_id).
all())
def test_invalid_service_type_rejected(self):
node_used = self._create_profiled_servicechain_node(
service_type="test")['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
res = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec_used['id']],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NoDriverAvailableForAction',
res['NeutronError']['type'])
def test_node_create(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
self._create_simple_service_chain()
expected_stack_name = mock.ANY
expected_stack_params = mock.ANY
stack_create.assert_called_once_with(
expected_stack_name,
self.DEFAULT_LB_CONFIG_DICT,
expected_stack_params)
def _get_pool_member_resource_dict(self, port):
member_ip = port['fixed_ips'][0]['ip_address']
member_name = 'mem-' + member_ip
member = {member_name: {
'Type': 'OS::Neutron::LBaaS::PoolMember',
'Properties': {
'subnet': {'get_param': 'Subnet'},
'weight': 1,
'admin_state_up': True,
'address': member_ip,
'protocol_port': {'get_param': 'app_port'},
'pool': {'Ref': 'test_pool'}
}
}
}
return member
def _create_policy_target_port(self, policy_target_group_id):
pt = self.create_policy_target(
policy_target_group_id=policy_target_group_id)['policy_target']
req = self.new_show_request('ports', pt['port_id'], fmt=self.fmt)
port = self.deserialize(self.fmt,
req.get_response(self.api))['port']
return (pt, port)
def _create_external_policy(self, consumed_prs, routes=None):
with self.network(router__external=True, shared=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
if not routes:
routes = [{'destination': '172.0.0.0/22', 'nexthop': None}]
self.create_external_segment(
shared=True,
name="default",
external_routes=routes,
subnet_id=sub['subnet']['id'])
return self.create_external_policy(
consumed_policy_rule_sets={consumed_prs: ''})
def _test_lb_node_create(self, consumer_external=False):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
node_id = self._create_profiled_servicechain_node(
service_type=constants.LOADBALANCERV2)[
'servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group()['policy_target_group']
_, port1 = self._create_policy_target_port(provider['id'])
_, port2 = self._create_policy_target_port(provider['id'])
if consumer_external:
self._create_external_policy(prs['id'])
else:
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={prs['id']: ''})
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
pool_member1 = self._get_pool_member_resource_dict(port1)
pool_member2 = self._get_pool_member_resource_dict(port2)
# Instantiating the chain invokes stack create
expected_stack_template = copy.deepcopy(
self.DEFAULT_LB_CONFIG_DICT)
expected_stack_template['Resources'].update(pool_member1)
expected_stack_template['Resources'].update(pool_member2)
expected_stack_name = mock.ANY
# TODO(Magesh): Verify expected_stack_params with IP address from
# Network Service Policy
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
return (expected_stack_template, provider,
created_stacks_map[0].stack_id)
def _test_lb_dynamic_pool_member_add(self, expected_stack_template,
provider, stack_id):
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
stack_update.return_value = {'stack': {
'id': stack_id}}
# Creating PT will update the node, thereby adding the PT as an
# LB Pool Member using heat stack
pt, port = self._create_policy_target_port(provider['id'])
pool_member = self._get_pool_member_resource_dict(port)
expected_stack_template['Resources'].update(pool_member)
expected_stack_id = stack_id
expected_stack_params = {}
stack_update.assert_called_once_with(
expected_stack_id,
expected_stack_template,
expected_stack_params)
return (pt, pool_member)
def _test_dynamic_lb_pool_member_delete(self, pt, pool_member,
expected_stack_template,
stack_id):
# Deleting PT will update the node, thereby removing the Pool
# Member from heat stack
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
self.delete_policy_target(pt['id'])
template_on_delete_pt = copy.deepcopy(expected_stack_template)
template_on_delete_pt['Resources'].pop(list(pool_member.keys())[0])
expected_stack_id = stack_id
expected_stack_params = {}
stack_update.assert_called_once_with(
expected_stack_id,
template_on_delete_pt,
expected_stack_params)
def _test_node_cleanup(self, ptg, stack_id):
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
self.update_policy_target_group(
ptg['id'], consumed_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(ptg['id'], expected_res_status=204)
stack_delete.assert_called_once_with(stack_id)
def test_lb_node_operations(self):
expected_stack_template, provider, stack_id = (
self._test_lb_node_create())
pt, pool_member = self._test_lb_dynamic_pool_member_add(
expected_stack_template, provider, stack_id)
self._test_dynamic_lb_pool_member_delete(
pt, pool_member, expected_stack_template, stack_id)
self._test_node_cleanup(provider, stack_id)
def test_lb_redirect_from_external(self):
expected_stack_template, provider, stack_id = (
self._test_lb_node_create(consumer_external=True))
pt, pool_member = self._test_lb_dynamic_pool_member_add(
expected_stack_template, provider, stack_id)
self._test_dynamic_lb_pool_member_delete(
pt, pool_member, expected_stack_template, stack_id)
self._test_node_cleanup(provider, stack_id)
def _create_fwredirect_ruleset(self, classifier_port, classifier_protocol):
node_id = self._create_profiled_servicechain_node(
service_type=constants.FIREWALL)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
action = self.create_policy_action(action_type='REDIRECT',
action_value=spec['id'])
classifier = self.create_policy_classifier(
port_range=classifier_port, protocol=classifier_protocol,
direction='bi')
rule = self.create_policy_rule(
policy_actions=[action['policy_action']['id']],
policy_classifier_id=classifier['policy_classifier']['id'])
rule = rule['policy_rule']
prs = self.create_policy_rule_set(policy_rules=[rule['id']])
return (prs['policy_rule_set'], node_id)
def _get_ptg_cidr(self, ptg):
req = self.new_show_request(
'subnets', ptg['subnets'][0], fmt=self.fmt)
ptg_subnet = self.deserialize(
self.fmt, req.get_response(self.api))['subnet']
return ptg_subnet['cidr']
def _get_firewall_rule_dict(self, rule_name, protocol, port, provider_cidr,
consumer_cidr):
if provider_cidr and consumer_cidr:
fw_rule = {rule_name: {'type': "OS::Neutron::FirewallRule",
'properties': {
"protocol": protocol,
"enabled": True,
"destination_port": port,
"action": "allow",
"destination_ip_address": provider_cidr,
"source_ip_address": consumer_cidr
}
}
}
return fw_rule
return {}
def test_fw_node_east_west(self):
classifier_port = '66'
classifier_protocol = 'udp'
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prs, node_id = self._create_fwredirect_ruleset(
classifier_port, classifier_protocol)
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
stack_id = created_stacks_map[0].stack_id
provider_cidr = self._get_ptg_cidr(provider)
# TODO(ivar): This has to be removed once support to consumer list
# is implemented
# consumer_cidr = self._get_ptg_cidr(consumer)
consumer_cidr = []
fw_rule = self._get_firewall_rule_dict(
'Rule_1', classifier_protocol, classifier_port,
provider_cidr, consumer_cidr)
expected_stack_template = copy.deepcopy(
self.DEFAULT_FW_CONFIG_DICT)
expected_stack_template['resources'][
'test_fw_policy']['properties']['firewall_rules'] = []
expected_stack_template['resources'].update(fw_rule)
expected_stack_name = mock.ANY
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
self._test_node_cleanup(provider, stack_id)
def _test_fw_node_north_south(self, consumer_cidrs):
classifier_port = '66'
classifier_protocol = 'udp'
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prs, node_id = self._create_fwredirect_ruleset(
classifier_port, classifier_protocol)
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
routes = []
for consumer_cidr in consumer_cidrs:
routes.append({'destination': consumer_cidr, 'nexthop': None})
self._create_external_policy(prs['id'], routes=routes)
# TODO(ivar): This has to be removed once support to consumer list
# is implemented
consumer_cidrs = []
created_stacks_map = self._get_node_instance_stacks(node_id)
self.assertEqual(1, len(created_stacks_map))
stack_id = created_stacks_map[0].stack_id
expected_stack_template = copy.deepcopy(
self.DEFAULT_FW_CONFIG_DICT)
expected_stack_template['resources']['test_fw_policy'][
'properties']['firewall_rules'] = []
provider_cidr = self._get_ptg_cidr(provider)
rule_num = 1
for consumer_cidr in consumer_cidrs:
rule_name = 'Rule_' + str(rule_num)
fw_rule = self._get_firewall_rule_dict(
rule_name, classifier_protocol, classifier_port,
provider_cidr, consumer_cidr)
rule_num = rule_num + 1
expected_stack_template['resources'].update(fw_rule)
expected_stack_template['resources']['test_fw_policy'][
'properties']['firewall_rules'].append(
{'get_resource': rule_name})
expected_stack_name = mock.ANY
expected_stack_params = {}
stack_create.assert_called_once_with(
expected_stack_name,
expected_stack_template,
expected_stack_params)
self._test_node_cleanup(provider, stack_id)
def test_fw_node_north_south_single_external_cidr(self):
self._test_fw_node_north_south(['172.0.0.0/22'])
def test_fw_node_north_south_multiple_external_cidr(self):
self._test_fw_node_north_south(['172.0.0.0/22', '20.0.0.0/16'])
def test_node_update(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
prof = self.create_service_profile(
service_type=constants.LOADBALANCERV2,
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
self._create_chain_with_nodes(node_ids=[node['id']])
with mock.patch.object(heatClient.HeatClient,
'update') as stack_update:
self.update_servicechain_node(
node['id'],
name='newname',
expected_res_status=200)
# Name update should not update stack ??
stack_update.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
def test_node_delete(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
provider, _, _ = self._create_simple_service_chain()
with mock.patch.object(heatClient.HeatClient,
'delete'):
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
def test_wait_stack_delete_for_instance_delete(self):
with mock.patch.object(heatClient.HeatClient,
'create') as stack_create:
stack_create.return_value = {'stack': {
'id': uuidutils.generate_uuid()}}
provider, _, _ = self._create_simple_service_chain()
# Verify that as part of delete service chain instance we call
# get method for heat stack 5 times before giving up if the state
# does not become DELETE_COMPLETE
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
with mock.patch.object(heatClient.HeatClient,
'get') as stack_get:
stack_get.return_value = MockStackObject(
'DELETE_IN_PROGRESS')
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
# Create and delete another service chain instance and verify that
# we call get method for heat stack only once if the stack state
# is DELETE_COMPLETE
provider, _, _ = self._create_simple_service_chain()
with mock.patch.object(heatClient.HeatClient,
'delete') as stack_delete:
with mock.patch.object(heatClient.HeatClient,
'get') as stack_get:
stack_get.return_value = MockStackObject(
'DELETE_COMPLETE')
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
stack_delete.assert_called_once_with(mock.ANY)
def test_stack_not_found_ignored(self):
mock.patch(heatclient.__name__ + ".client.Client",
new=MockHeatClientDeleteNotFound).start()
provider, _, _ = self._create_simple_service_chain()
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)

View File

@ -1,899 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import webob.exc
from neutron.common import config
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as pconst
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.services.grouppolicy import config as gpconfig # noqa
from gbpservice.neutron.services.servicechain.plugins.ncp import (
context as ncp_context)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
plugin as ncp_plugin)
import gbpservice.neutron.services.servicechain.plugins.ncp.config # noqa
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
dummy_driver as dummy_driver)
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain import (
base_test_servicechain_plugin as test_base)
class ServiceChainNCPTestPlugin(ncp_plugin.NodeCompositionPlugin):
supported_extension_aliases = ['servicechain'] + (
test_group_policy_db.UNSUPPORTED_REQUIRED_EXTS)
path_prefix = "/servicechain"
SC_PLUGIN_KLASS = (ServiceChainNCPTestPlugin.__module__ + '.' +
ServiceChainNCPTestPlugin.__name__)
CORE_PLUGIN = test_gp_driver.CORE_PLUGIN
GP_PLUGIN_KLASS = (
"gbpservice.neutron.services.grouppolicy.plugin.GroupPolicyPlugin"
)
CHAIN_TENANT_ID = 'sci_owner'
class NodeCompositionPluginTestMixin(object):
DEFAULT_LB_CONFIG = '{}'
SERVICE_PROFILE_VENDOR = 'dummy'
@property
def sc_plugin(self):
return directory.get_plugin(pconst.SERVICECHAIN)
def _create_service_profile(self, **kwargs):
"""Create service profile wrapper that can be used by drivers."""
return self.create_service_profile(**kwargs)
def _create_redirect_rule(self, spec_id):
action = self.create_policy_action(action_type='REDIRECT',
action_value=spec_id)
classifier = self.create_policy_classifier(
port_range=80, protocol='tcp', direction='bi')
rule = self.create_policy_rule(
policy_actions=[action['policy_action']['id']],
policy_classifier_id=classifier['policy_classifier']['id'])
return rule
def _create_redirect_prs(self, spec_id):
rule = self._create_redirect_rule(spec_id)['policy_rule']
prs = self.create_policy_rule_set(policy_rules=[rule['id']])
return prs
def _create_simple_service_chain(self, number_of_nodes=1,
service_type='LOADBALANCERV2'):
prof = self.create_service_profile(
service_type=service_type,
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_ids = []
for x in range(number_of_nodes):
node_ids.append(self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id'])
return self._create_chain_with_nodes(node_ids)
def _create_chain_with_nodes(self, node_ids=None):
node_ids = node_ids or []
spec = self.create_servicechain_spec(
nodes=node_ids,
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})['policy_target_group']
return provider, consumer, prs
def _add_node_driver(self, name):
inst = dummy_driver.NoopNodeDriver()
inst.initialize(name)
ext = mock.Mock()
ext.obj = inst
self.sc_plugin.driver_manager.ordered_drivers.append(ext)
self.sc_plugin.driver_manager.drivers[name] = ext
class NodeCompositionPluginTestCase(
test_base.BaseTestGroupPolicyPluginGroupResources,
NodeCompositionPluginTestMixin):
def setUp(self, core_plugin=None, gp_plugin=None, node_drivers=None,
node_plumber=None):
cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
if node_drivers:
cfg.CONF.set_override('node_drivers', node_drivers,
group='node_composition_plugin')
cfg.CONF.set_override('node_plumber', node_plumber or 'dummy_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping',
'chain_mapping'],
group='group_policy')
super(NodeCompositionPluginTestCase, self).setUp(
core_plugin=core_plugin or CORE_PLUGIN,
gp_plugin=gp_plugin or GP_PLUGIN_KLASS,
sc_plugin=SC_PLUGIN_KLASS)
self.driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
def _create_simple_chain(self):
node = self._create_profiled_servicechain_node(
service_type="LOADBALANCERV2",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
action = self.create_policy_action(
action_type='REDIRECT', action_value=spec['id'])['policy_action']
classifier = self.create_policy_classifier(
direction='bi', port_range=80, protocol='tcp')['policy_classifier']
rule = self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[action['id']])['policy_rule']
prs = self.create_policy_rule_set(
policy_rules=[rule['id']])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})['policy_target_group']
return provider, consumer, node
def test_spec_ordering_list_servicechain_instances(self):
pass
def test_context_attributes(self):
# Verify Context attributes for simple config
plugin_context = n_context.get_admin_context()
profile = self._create_service_profile(
service_type="LOADBALANCERV2",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
self.create_policy_target_group()
management = self.create_policy_target_group(
service_management=True,
is_admin_context=True)['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']], classifier_id=classifier['id'])[
'servicechain_instance']
# Verify created without errors
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node)
self.assertIsNotNone(ctx.gbp_plugin)
self.assertIsNotNone(ctx.sc_plugin)
self.assertIsNotNone(ctx.core_plugin)
self.assertIsNotNone(ctx.plugin_context)
self.assertIsNotNone(ctx.plugin_session)
self.assertIsNotNone(ctx.session)
self.assertIsNotNone(ctx.admin_context)
self.assertIsNotNone(ctx.admin_session)
del ctx.current_profile['nodes']
self.assertEqual(ctx.current_profile['id'], profile['id'])
self.assertEqual(instance['id'], ctx.instance['id'])
self.assertEqual(provider['id'], ctx.provider['id'])
self.assertIsNone(ctx.consumer)
self.assertEqual(management['id'], ctx.management['id'])
self.assertEqual([spec['id']], [x['id'] for x in ctx.relevant_specs])
self.assertIsNone(ctx.original_node)
self.assertEqual(0, len(ctx.get_service_targets()))
instance['provider_ptg_id'] = 'dummy-id'
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node)
self.assertIsNone(ctx.provider)
self.assertIsNone(ctx.consumer)
def test_context_relevant_specs(self):
plugin_context = n_context.get_admin_context()
node_used = self._create_profiled_servicechain_node(
service_type="LOADBALANCERV2",
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec_used['id']])['servicechain_instance']
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node_used)
self.assertEqual([spec_used['id']],
[x['id'] for x in ctx.relevant_specs])
def test_manager_initialized(self):
mgr = self.plugin.driver_manager
self.assertIsInstance(mgr.ordered_drivers[0].obj,
dummy_driver.NoopNodeDriver)
for driver in mgr.ordered_drivers:
self.assertTrue(driver.obj.initialized)
def test_spec_parameters(self):
"""Test that config_param_names is empty when using NCP.
In NCP the config attribute of a node may be something different than
a HEAT template, therefore config_param_names is not used.
"""
params_node_1 = ['p1', 'p2', 'p3']
params_node_2 = ['p4', 'p5', 'p6']
params_node_3 = ['p7', 'p8', 'p9']
def params_dict(params):
return jsonutils.dumps({'Parameters':
dict((x, {}) for x in params)})
prof = self._create_service_profile(
service_type='LOADBALANCERV2', shared=True,
vendor=self.SERVICE_PROFILE_VENDOR,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters
node1 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_1),
expected_res_status=201)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_2),
expected_res_status=201)['servicechain_node']
# Create SC spec with the nodes assigned
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']], shared=True,
expected_res_status=201)['servicechain_spec']
# Verify param names is empty
self.assertIsNone(spec['config_param_names'])
# Update the spec removing one node
self.update_servicechain_spec(spec['id'], nodes=[node1['id']],
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names is empty
self.assertIsNone(spec['config_param_names'])
# Update a node with new config params
self.update_servicechain_node(node1['id'],
config=params_dict(params_node_3),
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names is empty
self.assertIsNone(spec['config_param_names'])
def test_create_service_chain(self):
deploy = self.driver.create = mock.Mock()
destroy = self.driver.delete = mock.Mock()
self._create_simple_service_chain(1)
self.assertEqual(1, deploy.call_count)
self.assertEqual(0, destroy.call_count)
deploy.reset_mock()
provider, _, _ = self._create_simple_service_chain(3)
self.assertEqual(3, deploy.call_count)
self.assertEqual(0, destroy.call_count)
self.update_policy_target_group(provider['id'],
provided_policy_rule_sets={})
self.assertEqual(3, deploy.call_count)
self.assertEqual(3, destroy.call_count)
def test_update_service_chain(self):
deploy = self.driver.create = mock.Mock()
update = self.driver.update = mock.Mock()
destroy = self.driver.delete = mock.Mock()
provider, _, prs = self._create_simple_service_chain(1)
self.assertEqual(1, deploy.call_count)
self.assertEqual(0, destroy.call_count)
# REVISIT(Magesh): When bug #1446587 is fixed, we should test by
# performing a classifier or rule update instead of SC instance update
instances = self._list('servicechain_instances')[
'servicechain_instances']
self.assertEqual(1, len(instances))
self.update_servicechain_instance(
instances[0]['id'],
expected_res_status=200)
self.assertEqual(1, update.call_count)
self.assertEqual(0, destroy.call_count)
def test_create_service_chain_fails(self):
deploy = self.driver.create = mock.Mock()
destroy = self.driver.delete = mock.Mock()
deploy.side_effect = Exception
try:
self._create_simple_service_chain(3)
except Exception:
pass
self.assertEqual(1, deploy.call_count)
self.assertEqual(3, destroy.call_count)
def test_update_node_fails(self):
validate_update = self.driver.validate_update = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
validate_update.side_effect = exc.NodeCompositionPluginBadRequest(
resource='node', msg='reason')
res = self.update_servicechain_node(node_id,
description='somethingelse',
expected_res_status=400)
self.assertEqual('NodeCompositionPluginBadRequest',
res['NeutronError']['type'])
def test_update_instantiated_profile_fails(self):
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id], expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
res = self.update_service_profile(prof['id'],
vendor='somethingelse',
expected_res_status=400)
self.assertEqual('ServiceProfileInUseByAnInstance',
res['NeutronError']['type'])
def test_second_driver_scheduled_if_first_fails(self):
self._add_node_driver('test')
drivers = [x.obj for x in
self.sc_plugin.driver_manager.ordered_drivers]
create_1 = drivers[0].validate_create = mock.Mock()
create_1.side_effect = n_exc.NeutronException()
# This happens without error
profile = self._create_service_profile(
service_type="TYPE",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']], classifier_id=classifier['id'],
expected_res_status=201)
def test_chain_fails_if_no_drivers_available(self):
self._add_node_driver('test')
drivers = [x.obj for x in
self.sc_plugin.driver_manager.ordered_drivers]
create_1 = drivers[0].validate_create = mock.Mock()
create_1.side_effect = n_exc.NeutronException()
create_2 = drivers[1].validate_create = mock.Mock()
create_2.side_effect = n_exc.NeutronException()
profile = self._create_service_profile(
service_type="TYPE",
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=profile['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']], classifier_id=classifier['id'],
expected_res_status=400)
def test_multiple_nodes_update(self):
update = self.driver.update = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
self._create_chain_with_nodes([node['id']])
self.update_servicechain_node(node['id'], name='somethingelse')
self.assertEqual(1, update.call_count)
update.reset_mock()
self._create_chain_with_nodes([node['id']])
self._create_chain_with_nodes([node['id']])
self.update_servicechain_node(node['id'], name='somethingelse')
self.assertEqual(3, update.call_count)
def test_inuse_spec_node_update_rejected(self):
prof = self.create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node1 = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
res = self.update_servicechain_spec(spec['id'],
nodes=[node1['id']],
expected_res_status=400)
self.assertEqual('InuseSpecNodeUpdateNotAllowed',
res['NeutronError']['type'])
def test_instance_update(self):
prof = self.create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node1 = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
instances = self._list('servicechain_instances')[
'servicechain_instances']
self.assertEqual(1, len(instances))
spec2 = self.create_servicechain_spec(
nodes=[node1['id']],
expected_res_status=201)['servicechain_spec']
res = self.update_servicechain_instance(
instances[0]['id'], servicechain_specs=[spec2['id']],
expected_res_status=200)
self.assertEqual([spec2['id']],
res['servicechain_instance']['servicechain_specs'])
def test_relevant_ptg_update(self):
add = self.driver.update_policy_target_added = mock.Mock()
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
# Verify notification issued for created PT in the provider
pt = self.create_policy_target(
policy_target_group_id=provider['id'])['policy_target']
pt['port_attributes'] = {}
self.assertEqual(1, add.call_count)
add.assert_called_with(mock.ANY, pt)
del pt['port_attributes']
# Verify notification issued for deleted PT in the provider
self.delete_policy_target(pt['id'])
self.assertEqual(1, rem.call_count)
rem.assert_called_with(mock.ANY, pt)
def test_irrelevant_ptg_update(self):
add = self.driver.update_policy_target_added = mock.Mock()
rem = self.driver.update_policy_target_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']], expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
other = self.create_policy_target_group()['policy_target_group']
# Verify notification issued for created PT in the provider
pt = self.create_policy_target(
policy_target_group_id=other['id'])['policy_target']
self.assertFalse(add.called)
# Verify notification issued for deleted PT in the provider
self.delete_policy_target(pt['id'])
self.assertFalse(rem.called)
def test_notify_chain_update_hook(self):
update_hook = self.driver.notify_chain_parameters_updated = mock.Mock()
prof = self.create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
action = self.create_policy_action(action_type='REDIRECT',
action_value=spec['id'])
classifier = self.create_policy_classifier(
port_range=80, protocol='tcp', direction='bi')['policy_classifier']
rule = self.create_policy_rule(
policy_actions=[action['policy_action']['id']],
policy_classifier_id=classifier['id'])['policy_rule']
prs = self.create_policy_rule_set(
policy_rules=[rule['id']])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
instances = self._list('servicechain_instances')[
'servicechain_instances']
self.assertEqual(1, len(instances))
self.update_policy_classifier(classifier['id'], port_range=22)
update_hook.assert_called_with(mock.ANY)
def test_context_no_management(self):
# Verify Context attributes for simple config
plugin_context = n_context.get_admin_context()
plugin_context.is_admin = False
plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test-tenant'
node = self._create_profiled_servicechain_node()['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
# Verify admin created SM is None
management = self.create_policy_target_group(
service_management=True, tenant_id='admin',
is_admin_context=True)['policy_target_group']
pc = self.create_policy_classifier()['policy_classifier']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']],
classifier_id=pc['id'])['servicechain_instance']
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node)
self.assertIsNone(ctx.management)
self.delete_policy_target_group(management['id'],
is_admin_context=True)
shared_management = self.create_policy_target_group(
service_management=True, tenant_id='admin',
is_admin_context=True, shared=True)['policy_target_group']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']],
classifier_id=pc['id'])['servicechain_instance']
# Now admin Service Management PTG is visible
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node)
self.assertEqual(shared_management['id'], ctx.management['id'])
# Private management overrides shared one
private_management = self.create_policy_target_group(
service_management=True,
is_admin_context=True)['policy_target_group']
instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'], consumer_ptg_id='N/A',
servicechain_specs=[spec['id']],
classifier_id=pc['id'])['servicechain_instance']
ctx = ncp_context.get_node_driver_context(
self.plugin, plugin_context, instance, node)
self.assertEqual(private_management['id'], ctx.management['id'])
def test_node_drivers_notified_consumer_event(self):
add = self.driver.update_node_consumer_ptg_added = mock.Mock()
rem = self.driver.update_node_consumer_ptg_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})['policy_target_group']
# Verify notification issued for PTG consuming
add.assert_called_with(mock.ANY, consumer)
# Verify notification issued for PTG unconsuming
consumer = self.update_policy_target_group(
consumer['id'],
consumed_policy_rule_sets={})['policy_target_group']
rem.assert_called_with(mock.ANY, consumer)
provider, consumer, prs = self._create_simple_service_chain(3)
with mock.patch.object(ncp_plugin.NodeCompositionPlugin,
"update_chains_consumer_removed") as ptg_removed:
plugin_context = n_context.get_admin_context()
self._gbp_plugin.delete_policy_target_group(
plugin_context, consumer['id'])
self.assertEqual(ptg_removed.call_count, 1)
consumer['consumed_policy_rule_sets'] = []
ptg_removed.assert_called_once_with(
mock.ANY, consumer, mock.ANY)
add.reset_mock()
rem.reset_mock()
def test_no_unrelated_chains_notified(self):
add = self.driver.update_node_consumer_ptg_added = mock.Mock()
rem = self.driver.update_node_consumer_ptg_removed = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
# This creates a chain
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
# Create a PRS and assign a consumer with no provider (hence, no chain)
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
ptg = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})['policy_target_group']
# No notification should be issued
self.assertFalse(add.called)
self.assertFalse(rem.called)
# Remove the consumer
self.update_policy_target_group(ptg['id'],
consumed_policy_rule_sets={},
expected_res_status=200)
# No notification should be issued
self.assertFalse(add.called)
self.assertFalse(rem.called)
def test_node_drivers_notified_provider_updated(self):
upd = self.driver.policy_target_group_updated = mock.Mock()
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
# TODO(Sumit): Remove the following mocks
# once Heat node driver supports reporting status
provider['status'] = mock.ANY
provider['status_details'] = mock.ANY
# Verify notification issued for PTG consuming
upd.assert_called_with(mock.ANY, None, provider)
upd.reset_mock()
# Verify notification issued for PTG consuming
new_provider = self.update_policy_target_group(
provider['id'],
consumed_policy_rule_sets={prs['id']: ''})['policy_target_group']
upd.assert_called_with(mock.ANY, provider, new_provider)
upd.reset_mock()
class TestQuotasForServiceChain(test_base.ServiceChainPluginTestCase):
@property
def sc_plugin(self):
return directory.get_plugin(pconst.SERVICECHAIN)
def setUp(self, core_plugin=None, gp_plugin=None, node_drivers=None,
node_plumber=None):
if node_drivers:
cfg.CONF.set_override('node_drivers', node_drivers,
group='node_composition_plugin')
cfg.CONF.set_override('node_plumber', node_plumber or 'dummy_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping',
'chain_mapping'],
group='group_policy')
super(TestQuotasForServiceChain, self).setUp(
core_plugin=core_plugin or CORE_PLUGIN,
gp_plugin=gp_plugin or GP_PLUGIN_KLASS,
sc_plugin=SC_PLUGIN_KLASS)
self.driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
cfg.CONF.set_override('quota_servicechain_node', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', 1,
group='QUOTAS')
def tearDown(self):
cfg.CONF.set_override('quota_servicechain_node', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', -1,
group='QUOTAS')
super(TestQuotasForServiceChain, self).tearDown()
def test_servicechain_node_quota(self):
self.create_servicechain_node()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_node)
def test_servicechain_spec_quota(self):
self.create_servicechain_spec()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_spec)
def test_servicechain_instance_quota(self):
self.create_servicechain_instance()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_instance)
def test_service_profile(self):
self.create_service_profile(service_type=pconst.FIREWALL)
self.assertRaises(webob.exc.HTTPClientError,
self.create_service_profile,
service_type=pconst.FIREWALL)
def test_quota_implicit_service_instance(self):
prof = self.create_service_profile(
service_type='LOADBALANCERV2',
vendor="vendor")['service_profile']
node1_id = self.create_servicechain_node(
service_profile_id=prof['id'], config="{}",
expected_res_status=201)['servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node1_id],
expected_res_status=201)['servicechain_spec']
action = self.create_policy_action(action_type='REDIRECT',
action_value=spec['id'])
classifier = self.create_policy_classifier(
port_range=80, protocol='tcp', direction='bi')
rule = self.create_policy_rule(
policy_actions=[action['policy_action']['id']],
policy_classifier_id=classifier['policy_classifier']['id'])
prs = self.create_policy_rule_set(
policy_rules=[rule['policy_rule']['id']])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
# Second service instance creation should fail now
# sice service instance quota is 1, resulting in PTG
# creation error
self.assertRaises(webob.exc.HTTPClientError,
self.create_policy_target_group,
provided_policy_rule_sets={prs['id']: ''})

View File

@ -1,859 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from neutron_lib.plugins import constants
from oslo_serialization import jsonutils
import webob
from gbpservice.neutron.services.servicechain.plugins.ncp import (
plugin as ncp_plugin)
from gbpservice.neutron.services.servicechain.plugins.ncp import config # noqa
from gbpservice.neutron.services.servicechain.plugins.ncp.node_drivers import (
nfp_node_driver as nfp_node_driver)
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain import (
base_test_servicechain_plugin as test_base)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as test_ncp_plugin)
from gbpservice.nfp.orchestrator.db import nfp_db as nfp_db
SERVICE_DELETE_TIMEOUT = 15
SVC_MANAGEMENT_PTG = 'foo'
class ServiceChainNCPTestPlugin(ncp_plugin.NodeCompositionPlugin):
supported_extension_aliases = ['servicechain'] + (
test_group_policy_db.UNSUPPORTED_REQUIRED_EXTS)
path_prefix = "/servicechain"
SC_PLUGIN_KLASS = (ServiceChainNCPTestPlugin.__module__ + '.' +
ServiceChainNCPTestPlugin.__name__)
CORE_PLUGIN = test_gp_driver.CORE_PLUGIN
GP_PLUGIN_KLASS = (
"gbpservice.neutron.services.grouppolicy.plugin.GroupPolicyPlugin"
)
class NFPNodeDriverTestCase(
test_base.BaseTestGroupPolicyPluginGroupResources,
test_ncp_plugin.NodeCompositionPluginTestMixin):
DEFAULT_VPN_CONFIG_DICT = {
"heat_template_version": "2013-05-23",
"description": "Creates new vpn service",
"parameters": {
"RouterId": {
"type": "string", "description": "Router ID"
},
"Subnet": {
"type": "string", "description": "Subnet id"
},
"ClientAddressPoolCidr": {
"type": "string", "description": "Pool"
},
},
"resources": {
"SSLVPNConnection": {
"type": "OS::Neutron::SSLVPNConnection",
"properties": {
"credential_id": "",
"client_address_pool_cidr": {
"get_param": "ClientAddressPoolCidr"
},
"name": "vtun0",
"vpnservice_id": {
"get_resource": "VPNService"
},
"admin_state_up": 'true'
}
},
"VPNService": {
"type": "OS::Neutron::VPNService",
"properties": {
"router_id": {
"get_param": "RouterId"
},
"subnet_id": {
"get_param": "Subnet"
},
"admin_state_up": 'true',
"name": "VPNService"
}
}
}
}
DEFAULT_VPN_CONFIG = jsonutils.dumps(DEFAULT_VPN_CONFIG_DICT)
DEFAULT_LB_CONFIG_DICT = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"test_pool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"admin_state_up": True,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref": "HttpHM"}],
"name": "Haproxy pool",
"protocol": "HTTP",
"subnet_id": {"Ref": "Subnet"},
"vip": {
"subnet": {"Ref": "192.168.100.0"},
"address": {"Ref": "192.168.100.2"},
"name": "Haproxy vip",
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": True,
"description": "Haproxy vip from template"
}
}
},
"test_lb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"pool_id": {"Ref": "HaproxyPool"},
"protocol_port": 80
}
}
}
}
DEFAULT_LB_CONFIG = jsonutils.dumps(DEFAULT_LB_CONFIG_DICT)
DEFAULT_FW_CONFIG_DICT = {
"heat_template_version": "2013-05-23",
"resources": {
'test_fw': {
"type": "OS::Neutron::Firewall",
"properties": {
"admin_state_up": True,
"firewall_policy_id": {
"get_resource": "Firewall_policy"},
"name": "testFirewall",
"description": "test Firewall"
}
},
'test_fw_policy': {
"type": "OS::Neutron::FirewallPolicy",
"properties": {
"shared": False,
"description": "test firewall policy",
"name": "testFWPolicy",
"firewall_rules": [{
"get_resource": "Rule_1"}],
"audited": True
}
}
}
}
DEFAULT_FW_CONFIG = jsonutils.dumps(DEFAULT_FW_CONFIG_DICT)
SERVICE_PROFILE_VENDOR = 'NFP'
def _create_service_profile(self, **kwargs):
if not kwargs.get('insertion_mode'):
kwargs['insertion_mode'] = 'l3'
if not kwargs.get('service_flavor'):
if kwargs['service_type'] == 'LOADBALANCERV2':
kwargs['service_flavor'] = 'haproxy'
else:
kwargs['service_flavor'] = 'vyos'
return super(NFPNodeDriverTestCase, self)._create_service_profile(
**kwargs)
def setUp(self):
config.cfg.CONF.set_override('service_delete_timeout',
SERVICE_DELETE_TIMEOUT,
group='nfp_node_driver')
config.cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
config.cfg.CONF.set_override('node_drivers', ['nfp_node_driver'],
group='node_composition_plugin')
config.cfg.CONF.set_override('node_plumber', 'stitching_plumber',
group='node_composition_plugin')
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping',
'chain_mapping'],
group='group_policy')
super(NFPNodeDriverTestCase, self).setUp(
core_plugin=CORE_PLUGIN,
gp_plugin=GP_PLUGIN_KLASS,
sc_plugin=SC_PLUGIN_KLASS)
def test_manager_initialized(self):
mgr = self.plugin.driver_manager
self.assertIsInstance(mgr.ordered_drivers[0].obj,
nfp_node_driver.NFPNodeDriver)
for driver in mgr.ordered_drivers:
self.assertTrue(driver.obj.initialized)
def _nfp_create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, profile_id=None,
service_flavor=None, **kwargs):
if not profile_id:
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3', service_flavor='haproxy',
tenant_id=profile_tenant_id or self._tenant_id)[
'service_profile']
else:
prof = self.get_service_profile(profile_id)
service_config = kwargs.get('config')
if not service_config or service_config == '{}':
if service_type == constants.FIREWALL:
kwargs['config'] = self.DEFAULT_FW_CONFIG
else:
kwargs['config'] = self.DEFAULT_LB_CONFIG
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
def _create_simple_fw_service_chain(self, number_of_nodes=1,
service_type='FIREWALL'):
prof = self.create_service_profile(
service_type=service_type,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3', service_flavor='vyos')['service_profile']
node_ids = []
for x in range(number_of_nodes):
node_ids.append(self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node']['id'])
return self._nfp_create_chain_with_nodes(node_ids)
def _nfp_create_chain_with_nodes(self, node_ids=None):
node_ids = node_ids or []
spec = self.create_servicechain_spec(
nodes=node_ids,
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})['policy_target_group']
with mock.patch.object(nfp_node_driver.NFPClientApi,
"consumer_ptg_added_notification") as ptg_added:
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
ptg_added.assert_called_once_with(mock.ANY,
mock.ANY, mock.ANY)
return provider, consumer, prs
def test_spec_parameters(self):
pass
def test_spec_ordering_list_servicechain_instances(self):
pass
class DummyMap(object):
network_function_id = '12'
status = 'UP'
class TestServiceChainInstance(NFPNodeDriverTestCase):
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_node_create(self, plumbing_info):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"get_network_function") as get_nf:
create_nf.return_value = {
'id': '126231632163'
}
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
self._create_simple_fw_service_chain()
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
def _test_node_update(self):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"get_network_function") as get_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"update_service_config") as update_svc_config:
create_nf.return_value = {
'id': '126231632163'
}
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
self.create_policy_target_group(
name='foo')['policy_target_group']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node']
self._nfp_create_chain_with_nodes(node_ids=[node['id']])
self.update_servicechain_node(
node['id'],
name='newname',
expected_res_status=200)
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
update_svc_config.assert_called_once_with()
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_node_delete(self, plumbing_info):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node'][
'id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
with mock.patch.object(nfp_node_driver.NFPClientApi,
"get_network_function") as get_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"delete_network_function") as delete_nf,\
mock.patch.object(nfp_db.NFPDbBase,
"get_node_instance_network_function_map") as get_map,\
mock.patch.object(nfp_db.NFPDbBase,
"update_node_instance_network_function_map") as update_map:
get_map.return_value = DummyMap()
update_map.return_value = mock.ANY
get_nf.return_value = None
self.delete_policy_target_group(
provider['id'], expected_res_status=204)
delete_nf.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_wait_for_network_function_delete_completion(self, plumbing_info):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node'][
'id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
provider = self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
with mock.patch.object(nfp_node_driver.NFPClientApi,
'delete_network_function') as delete_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf,\
mock.patch.object(nfp_db.NFPDbBase,
"get_node_instance_network_function_map") as get_map,\
mock.patch.object(nfp_db.NFPDbBase,
"update_node_instance_network_function_map") as \
update_map:
get_map.return_value = DummyMap()
update_map.return_value = mock.ANY
delete_nf.return_value = None
get_nf.return_value = None
# Removing the PRSs will make the PTG deletable again
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={},
expected_res_status=200)
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
delete_nf.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY)
def _create_policy_target_port(self, policy_target_group_id):
pt = self.create_policy_target(
policy_target_group_id=policy_target_group_id)['policy_target']
req = self.new_show_request('ports', pt['port_id'], fmt=self.fmt)
port = self.deserialize(self.fmt,
req.get_response(self.api))['port']
return (pt, port)
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_lb_node_create(self, plumbing_info, consumer_external=False):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'endpoint'
}
node_id = self._nfp_create_profiled_servicechain_node(
service_type=constants.LOADBALANCERV2)[
'servicechain_node']['id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
params = [{'type': 'ip_single', 'name': 'vip_ip',
'value': 'self_subnet'}]
nsp = self.create_network_service_policy(
network_service_params=params)
network_service_policy_id = nsp['network_service_policy']['id']
provider = self.create_policy_target_group(
network_service_policy_id=network_service_policy_id,
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
with mock.patch.object(nfp_node_driver.NFPClientApi,
"policy_target_added_notification") as pt_added:
# Verify notification issued for created PT in the provider
_, port = self._create_policy_target_port(provider['id'])
pt_added.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY)
if consumer_external:
self._create_external_policy(prs['id'])
else:
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
def test_invalid_service_type_rejected(self):
node_used = self._nfp_create_profiled_servicechain_node(
service_type="test")['servicechain_node']
spec_used = self.create_servicechain_spec(
nodes=[node_used['id']])['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
res = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec_used['id']],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NoDriverAvailableForAction',
res['NeutronError']['type'])
def test_is_node_order_in_spec_supported(self):
lb_prof = self.create_service_profile(
service_type=constants.LOADBALANCERV2,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='haproxy')['service_profile']
vpn_prof = self.create_service_profile(
service_type=constants.VPN,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
vpn_node = self.create_servicechain_node(
service_profile_id=vpn_prof['id'],
config=self.DEFAULT_VPN_CONFIG,
expected_res_status=201)['servicechain_node']
lb_node = self.create_servicechain_node(
service_profile_id=lb_prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
node_ids = [lb_node['id'], vpn_node['id']]
spec = self.create_servicechain_spec(
nodes=node_ids,
expected_res_status=201)['servicechain_spec']
provider = self.create_policy_target_group()['policy_target_group']
classifier = self.create_policy_classifier()['policy_classifier']
res = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec['id']],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NoDriverAvailableForAction',
res['NeutronError']['type'])
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_validate_update(self, plumbing_info):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"get_network_function") as get_nf:
create_nf.return_value = {
'id': '126231632163'
}
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
fw_prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
fw_node = self.create_servicechain_node(
service_profile_id=fw_prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node']
node_ids = [fw_node['id']]
spec = self.create_servicechain_spec(
nodes=node_ids,
expected_res_status=201)['servicechain_spec']
provider = self.create_policy_target_group()[
'policy_target_group']
classifier = self.create_policy_classifier()[
'policy_classifier']
servicechain_instance = self.create_servicechain_instance(
provider_ptg_id=provider['id'],
classifier_id=classifier['id'],
servicechain_specs=[spec['id']])[
'servicechain_instance']
fw_prof = self.create_service_profile(
service_type='test',
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
fw_node = self.create_servicechain_node(
service_profile_id=fw_prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node']
node_ids = [fw_node['id']]
spec = self.create_servicechain_spec(
nodes=node_ids,
expected_res_status=201)['servicechain_spec']
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
with mock.patch.object(nfp_node_driver.NFPClientApi,
"get_network_function") as get_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
"delete_network_function") as delete_nf,\
mock.patch.object(nfp_db.NFPDbBase,
"get_node_instance_network_function_map") as get_map,\
mock.patch.object(nfp_db.NFPDbBase,
"update_node_instance_network_function_map") as \
update_map:
get_map.return_value = DummyMap()
update_map.return_value = mock.ANY
get_nf.return_value = None
res = self.update_servicechain_instance(
servicechain_instance['id'],
servicechain_specs=[spec['id']],
expected_res_status=webob.exc.HTTPBadRequest.code)
delete_nf.assert_called_once_with(mock.ANY,
mock.ANY, mock.ANY)
self.assertEqual('NoDriverAvailableForAction',
res['NeutronError']['type'])
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_update_node_consumer_ptg_added(self, plumbing_info):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node'][
'id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
with mock.patch.object(nfp_node_driver.NFPClientApi,
"consumer_ptg_added_notification") as ptg_added:
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
ptg_added.assert_called_once_with(mock.ANY,
mock.ANY, mock.ANY)
def _test_update_node_consumer_ptg_removed(self):
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
prof = self.create_service_profile(
service_type=constants.FIREWALL,
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3',
service_flavor='vyos')['service_profile']
node_id = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node'][
'id']
spec = self.create_servicechain_spec(
nodes=[node_id],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
self.create_policy_target_group(
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
with mock.patch.object(nfp_node_driver.NFPClientApi,
"consumer_ptg_added_notification") as ptg_added:
consumer = self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
ptg_added.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY)
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
with mock.patch.object(nfp_node_driver.NFPClientApi,
"consumer_ptg_removed_notification") as ptg_removed:
self.delete_policy_target_group(
consumer['id'], expected_res_status=204)
ptg_removed.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_policy_target_add_remove(self, plumbing_info):
prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3', service_flavor='haproxy')['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_LB_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']],
expected_res_status=201)['servicechain_spec']
prs = self._create_redirect_prs(spec['id'])['policy_rule_set']
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'endpoint'
}
params = [{'type': 'ip_single', 'name': 'vip_ip',
'value': 'self_subnet'}]
nsp = self.create_network_service_policy(
network_service_params=params)
network_service_policy_id = nsp['network_service_policy'][
'id']
provider = self.create_policy_target_group(
network_service_policy_id=network_service_policy_id,
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
self.create_policy_target_group(
consumed_policy_rule_sets={prs['id']: ''})
with mock.patch.object(nfp_node_driver.NFPClientApi,
"policy_target_added_notification") as pt_added:
# Verify notification issued for created PT in the provider
pt = self.create_policy_target(
policy_target_group_id=provider['id'])[
'policy_target']
create_nf.assert_called_once_with(mock.ANY, mock.ANY)
pt_added.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY)
# Verify notification issued for deleted PT in the provider
with mock.patch.object(nfp_node_driver.NFPClientApi,
"policy_target_removed_notification") as pt_removed:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
self.delete_policy_target(pt['id'])
pt_removed.assert_called_once_with(mock.ANY, mock.ANY,
mock.ANY)
@mock.patch.object(nfp_node_driver.NFPClientApi, 'get_plumbing_info')
def test_policy_target_group_updated(self, plumbing_info):
prof = self._create_service_profile(
service_type='FIREWALL',
vendor=self.SERVICE_PROFILE_VENDOR,
insertion_mode='l3', service_flavor='vyos')['service_profile']
node = self.create_servicechain_node(
service_profile_id=prof['id'],
config=self.DEFAULT_FW_CONFIG,
expected_res_status=201)['servicechain_node']
spec = self.create_servicechain_spec(
nodes=[node['id']])['servicechain_spec']
action = self.create_policy_action(
action_type='REDIRECT', action_value=spec['id'])[
'policy_action']
classifier = self.create_policy_classifier(
direction='bi', protocol='icmp')[
'policy_classifier']
rule = self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[action['id']])['policy_rule']
prs = self.create_policy_rule_set(
policy_rules=[rule['id']])['policy_rule_set']
# allow
allow_action = self.create_policy_action(action_type='ALLOW')[
'policy_action']
allow_rule = self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[allow_action['id']])['policy_rule']
allow_prs = self.create_policy_rule_set(
policy_rules=[allow_rule['id']])['policy_rule_set']
# ref ptg
ref_ptg = self.create_policy_target_group()['policy_target_group']
ref_pt = self.create_policy_target(
policy_target_group_id=ref_ptg['id'])['policy_target']
with mock.patch.object(nfp_node_driver.NFPClientApi,
"create_network_function") as create_nf:
with mock.patch.object(nfp_node_driver.NFPClientApi,
'get_network_function') as get_nf:
get_nf.return_value = {
'id': '126231632163',
'status': 'ACTIVE'
}
create_nf.return_value = {
'id': '126231632163'
}
plumbing_info.return_value = {
'management': [],
'provider': [{}],
'consumer': [{}],
'plumbing_type': 'gateway'
}
orig_ptg = self.create_policy_target_group(
description="opflex_eoc:%s" % ref_pt['port_id'],
provided_policy_rule_sets={prs['id']: ''})[
'policy_target_group']
current_ptg = self.update_policy_target_group(
orig_ptg['id'],
provided_policy_rule_sets={
prs['id']: '', allow_prs['id']: ''})[
'policy_target_group']
ref_ptg = self.show_policy_target_group(ref_ptg['id'])[
'policy_target_group']
self.assertSetEqual(set(ref_ptg['provided_policy_rule_sets']),
set(current_ptg[
'provided_policy_rule_sets']))

View File

@ -1,168 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron.common import config # noqa
from neutron_lib import context as n_context
from neutron_lib.plugins import constants as pconst
from oslo_config import cfg
from gbpservice.neutron.services.servicechain.plugins.ncp import model
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as base)
class TrafficStitchingPlumberTestCase(base.NodeCompositionPluginTestCase):
def setUp(self):
cfg.CONF.set_override('policy_drivers', ['implicit_policy',
'resource_mapping'],
group='group_policy')
cfg.CONF.set_override('allow_overlapping_ips', True)
cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
super(TrafficStitchingPlumberTestCase, self).setUp(
node_drivers=['node_dummy'], node_plumber='stitching_plumber',
core_plugin=test_gp_driver.CORE_PLUGIN)
self.driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
self.driver.get_plumbing_info = mock.Mock()
self.driver.get_plumbing_info.return_value = {}
def test_one_gateway_pt_prov_cons(self):
context = n_context.get_admin_context()
self.driver.get_plumbing_info.return_value = {
'provider': [{}], 'consumer': [{}], 'plumbing_type': 'gateway'}
provider, consumer, node = self._create_simple_chain()
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
# Verify Service PT created and correctly placed
targets = model.get_service_targets(context.session)
self.assertEqual(2, len(targets))
old_relationship = None
for target in targets:
self.assertEqual(node['id'], target.servicechain_node_id)
pt = self.show_policy_target(
target.policy_target_id)['policy_target']
if target.relationship == 'provider':
self.assertEqual(provider['id'],
pt['policy_target_group_id'])
self.assertTrue(pt['group_default_gateway'])
self.assertFalse(pt['proxy_gateway'])
else:
# Consumer side a proxy group exists
self.assertEqual(provider['proxy_group_id'],
pt['policy_target_group_id'])
self.assertFalse(pt['group_default_gateway'])
self.assertTrue(pt['proxy_gateway'])
self.assertNotEqual(old_relationship, target.relationship)
old_relationship = target.relationship
port = self._get_object('ports', pt['port_id'], self.api)['port']
self.assertTrue(port['name'].startswith('pt_service_target_'),
"Port name doesn't start with 'pt_service_target_"
"'.\nport:\n%s\n" % port)
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={})
# With chain deletion, also the Service PTs are deleted
new_targets = model.get_service_targets(context.session)
self.assertEqual(0, len(new_targets))
for target in targets:
self.show_policy_target(
target.policy_target_id, expected_res_status=404)
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
self.assertIsNone(provider['proxy_group_id'])
def test_multiple_endpoint_pt_provider(self):
context = n_context.get_admin_context()
self.driver.get_plumbing_info.return_value = {
'provider': [{}, {}], 'consumer': [], 'plumbing_type': 'endpoint'}
provider, consumer, node = self._create_simple_chain()
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
# Verify Service PT created and contains proper name, description
targets = model.get_service_targets(context.session)
self.assertEqual(2, len(targets))
for target in targets:
pt = self.show_policy_target(
target.policy_target_id)['policy_target']
self.assertEqual(provider['id'],
pt['policy_target_group_id'])
self.assertTrue(pt['name'].startswith('tscp_endpoint_service'),
"Policy Target name doesn't start with "
"'tscp_endpoint_service'.\npt:\n%s\n" % pt)
self.assertTrue(node['id'] in pt['description'],
"Policy Target description doesn't contains "
" node id.\nnode:\n%s\n" % node)
port = self._get_object('ports', pt['port_id'], self.api)['port']
self.assertTrue(port['name'].startswith(
'pt_tscp_endpoint_service'),
"Port name doesn't start with "
"'pt_tscp_endpoint_service'.\nport:\n%s\n" % port)
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={})
# With chain deletion, also the Service PTs are deleted
new_targets = model.get_service_targets(context.session)
self.assertEqual(0, len(new_targets))
for target in targets:
self.show_policy_target(
target.policy_target_id, expected_res_status=404)
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
self.assertIsNone(provider['proxy_group_id'])
def get_plumbing_info_base(self, context):
service_type = context.current_profile['service_type']
plumbing_request = {'management': [], 'provider': [{}],
'consumer': [{}]}
if service_type in [pconst.FIREWALL]:
plumbing_request['plumbing_type'] = 'gateway'
else:
plumbing_request = {}
return plumbing_request
def test_get_service_targets_in_chain(self):
context = n_context.get_admin_context()
self.driver.get_plumbing_info = self.get_plumbing_info_base
lb_prof = self._create_service_profile(
service_type='LOADBALANCERV2',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
lb_node = self.create_servicechain_node(
service_profile_id=lb_prof['id'],
config=self.DEFAULT_LB_CONFIG)['servicechain_node']
fw_prof = self._create_service_profile(
service_type='FIREWALL',
vendor=self.SERVICE_PROFILE_VENDOR)['service_profile']
fw_node = self.create_servicechain_node(
service_profile_id=fw_prof['id'],
config='{}')['servicechain_node']
self._create_chain_with_nodes([fw_node['id'], lb_node['id']])
targets = model.get_service_targets(context.session)
self.assertEqual(2, len(targets))
def test_ptg_delete(self):
self.driver.get_plumbing_info.return_value = {
'provider': [{}], 'consumer': [{}],
'plumbing_type': 'transparent'}
provider, _, _ = self._create_simple_service_chain()
# Deleting a PTG will fail because of existing PTs
self.delete_policy_target_group(provider['id'],
expected_res_status=204)

View File

@ -1,203 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron.common import config # noqa
from neutron_lib import context as n_context
from neutron_lib.plugins import directory
from oslo_config import cfg
from gbpservice.neutron.services.grouppolicy import (
policy_driver_manager as pdm)
from gbpservice.neutron.services.servicechain.plugins.ncp import model
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as base)
GATEWAY = 'gateway'
GATEWAY_HA = 'gateway_ha'
TRANSPARENT = 'transparent'
ENDPOINT = 'endpoint'
info_mapping = {
GATEWAY: {'plumbing_type': GATEWAY, 'provider': [{}], 'consumer': [{}]},
GATEWAY_HA: {'plumbing_type': GATEWAY, 'provider': [{}, {}, {}],
'consumer': [{}, {}, {}]},
TRANSPARENT: {'plumbing_type': TRANSPARENT, 'provider': [{}],
'consumer': [{}]},
ENDPOINT: {'plumbing_type': ENDPOINT, 'provider': [{}]},
}
info_mapping['FIREWALL'] = info_mapping[GATEWAY]
info_mapping['FIREWALL_HA'] = info_mapping[GATEWAY_HA]
info_mapping['TRANSPARENT_FIREWALL'] = info_mapping[TRANSPARENT]
info_mapping['LOADBALANCERV2'] = info_mapping[ENDPOINT]
class ResourceMappingStitchingPlumberGBPTestCase(
test_gp_driver.ResourceMappingTestCase):
def setUp(self):
cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
cfg.CONF.set_override('node_plumber', 'stitching_plumber',
group='node_composition_plugin')
ml2_opts = {'mechanism_drivers': ['stitching_gbp'],
'extension_drivers': ['qos']}
host_agents = mock.patch('neutron.plugins.ml2.driver_context.'
'PortContext.host_agents').start()
host_agents.return_value = [self.agent_conf]
qos_plugin = 'qos'
super(ResourceMappingStitchingPlumberGBPTestCase, self).setUp(
sc_plugin=base.SC_PLUGIN_KLASS, ml2_options=ml2_opts,
qos_plugin=qos_plugin)
def get_plumbing_info(context):
return info_mapping.get(context.current_profile['service_type'])
self.node_driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
self.node_driver.get_plumbing_info = get_plumbing_info
pdm.PolicyDriverManager.get_policy_target_group_status = (
mock.MagicMock({}))
@property
def sc_plugin(self):
return directory.get_plugin('SERVICECHAIN')
class TestPolicyRuleSet(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestPolicyRuleSet):
pass
class TestServiceChain(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestServiceChain):
def test_parent_ruleset_update_for_redirect(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_enforce_parent_redirect_after_ptg_create(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_hierarchical_redirect(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_redirect_multiple_ptgs_single_prs(self):
# REVISIT(ivar): This test is doing a mock patching that breaks the
# workflow
pass
def test_action_spec_value_update(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_rule_update_hierarchial_prs(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_rule_update_updates_chain(self):
# NCP doesn't support multiple SPECs per instance
pass
class TestServiceChainAdminOwner(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestServiceChainAdminOwner):
def test_parent_ruleset_update_for_redirect(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_enforce_parent_redirect_after_ptg_create(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_hierarchical_redirect(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_redirect_multiple_ptgs_single_prs(self):
# REVISIT(ivar): This test is doing a mock patching that breaks the
# workflow
pass
def test_action_spec_value_update(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_rule_update_hierarchial_prs(self):
# NCP doesn't support multiple SPECs per instance
pass
def test_rule_update_updates_chain(self):
# NCP doesn't support multiple SPECs per instance
pass
class TestPolicyAction(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestPolicyAction):
pass
class TestPolicyRule(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestPolicyRule):
pass
class TestExternalSegment(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestExternalSegment):
def test_update(self):
super(TestExternalSegment, self).test_update(
proxy_ip_pool1='182.169.0.0/16',
proxy_ip_pool2='172.169.0.0/16')
class TestExternalPolicy(ResourceMappingStitchingPlumberGBPTestCase,
test_gp_driver.TestExternalPolicy):
pass
class TestImplicitServiceChains(ResourceMappingStitchingPlumberGBPTestCase,
base.NodeCompositionPluginTestMixin):
def test_service_targets_vif_details(self):
context = n_context.get_admin_context()
self._create_simple_service_chain(service_type='TRANSPARENT_FIREWALL')
targets = model.get_service_targets(context.session)
self.assertGreater(len(targets), 0)
for target in targets:
pt = self.show_policy_target(
target.policy_target_id)['policy_target']
# Being service targets, port filter and hybrid plug will be false
port = self._bind_port_to_host(pt['port_id'], 'host')['port']
self.assertFalse(port['binding:vif_details']['port_filter'])
self.assertFalse(port['binding:vif_details']['ovs_hybrid_plug'])
def test_endpoint_target_vif_details(self):
context = n_context.get_admin_context()
self._create_simple_service_chain(service_type='LOADBALANCERV2')
targets = model.get_service_targets(context.session)
self.assertGreater(len(targets), 0)
for target in targets:
pt = self.show_policy_target(
target.policy_target_id)['policy_target']
port = self._bind_port_to_host(pt['port_id'], 'host')['port']
self.assertTrue(port['binding:vif_details']['port_filter'])
# This change sets hybrid VIF plugging to True by default again
# https://github.com/openstack/neutron/commit/
# eca893be5b770c41cfc570dc016a41c30c2cdf23
self.assertTrue(port['binding:vif_details']['ovs_hybrid_plug'])

View File

@ -1,58 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from gbpservice.nfp.base_configurator.controllers import controller
class ControllerResolver(object):
"""This class forwards HTTP request to controller class.
This class create an object of Controller class with appropriate
parameter according to the path of HTTP request. According to the
parameter passed to Controller class it sends an RPC call/cast to
configurator.
"""
create_network_function_device_config = controller.Controller(
"create_network_function_device_config")
delete_network_function_device_config = controller.Controller(
"delete_network_function_device_config")
update_network_function_device_config = controller.Controller(
"update_network_function_device_config")
create_network_function_config = controller.Controller(
"create_network_function_config")
delete_network_function_config = controller.Controller(
"delete_network_function_config")
update_network_function_config = controller.Controller(
"update_network_function_config")
get_notifications = controller.Controller("get_notifications")
class V1Controller(object):
""" This class forwards HTTP requests starting with /v1/nfp.
All HTTP requests with path starting from /v1
land here. This class forward request with path starting from /v1/nfp
to ControllerResolver.
"""
nfp = ControllerResolver()
@pecan.expose()
def get(self):
return {'versions': [{'status': 'CURRENT',
'updated': '2014-12-11T00:00:00Z',
'id': 'v1'}]}

View File

@ -1,219 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from subprocess import CalledProcessError
import time
from oslo_log import log as logging
import oslo_serialization.jsonutils as jsonutils
import pecan
import requests
from gbpservice._i18n import _
from gbpservice.nfp.pecan import base_controller
LOG = logging.getLogger(__name__)
TOPIC = 'configurator'
NFP_SERVICE_LIST = ['heat', 'ansible']
SUCCESS_RESULTS = ['unhandled', 'success']
FAILURE = 'failure'
notifications = []
cache_ips = set()
class Controller(base_controller.BaseController):
"""Implements all the APIs Invoked by HTTP requests.
Implements following HTTP methods.
-get
-post
"""
def __init__(self, method_name):
try:
self.method_name = method_name
super(Controller, self).__init__()
except Exception as err:
msg = (
"Failed to initialize Controller class %s." %
str(err).capitalize())
LOG.error(msg)
self.vm_port = '8080'
self.max_retries = 60
def _push_notification(self, context, result, config_data, service_type):
global notifications
resource = config_data['resource']
if result.lower() in SUCCESS_RESULTS:
data = {'status_code': result}
else:
data = {'status_code': FAILURE,
'error_msg': result}
response = {'info': {'service_type': service_type,
'context': context},
'notification': [{
'resource': resource,
'data': data}]
}
notifications.append(response)
def _verify_vm_reachability(self, vm_ip, vm_port):
reachable = False
command = 'nc ' + vm_ip + ' ' + vm_port + ' -z'
ping_command = 'ping -c1 ' + vm_ip
for x in range(self.max_retries):
try:
subprocess.check_output(ping_command, stderr=subprocess.STDOUT,
shell=True)
subprocess.check_output(command, stderr=subprocess.STDOUT,
shell=True)
reachable = True
break
except CalledProcessError as err:
msg = ("Exception: %s " % err)
LOG.error(msg)
time.sleep(5)
except Exception:
time.sleep(5)
return reachable
@pecan.expose(method='GET', content_type='application/json')
def get(self):
"""Method of REST server to handle request get_notifications.
This method send an RPC call to configurator and returns Notification
data to config-agent
Returns: Dictionary that contains Notification data
"""
global cache_ips
global notifications
try:
if not cache_ips:
notification_data = jsonutils.dumps(notifications)
msg = ("Notification sent. Notification Data: %s"
% notification_data)
LOG.info(msg)
notifications = []
return notification_data
else:
for ip in cache_ips:
notification_response = requests.get(
'http://' + str(ip) + ':' + self.vm_port +
'/v1/nfp/get_notifications')
notification = jsonutils.loads(notification_response.text)
notifications.extend(notification)
cache_ips.remove(ip)
if ip not in cache_ips:
break
notification_data = jsonutils.dumps(notifications)
msg = ("Notification sent. Notification Data: %s"
% notification_data)
LOG.info(msg)
notifications = []
return notification_data
except Exception as err:
pecan.response.status = 400
msg = ("Failed to get notification_data %s."
% str(err).capitalize())
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
@pecan.expose(method='POST', content_type='application/json')
def post(self, **body):
"""Method of REST server to handle all the post requests.
This method sends an RPC cast to configurator according to the
HTTP request.
:param body: This method excepts dictionary as a parameter in HTTP
request and send this dictionary to configurator with RPC cast.
Returns: None
"""
try:
global cache_ips
global notifications
body = None
if pecan.request.is_body_readable:
body = pecan.request.json_body
# Assuming config list will have only one element
config_data = body['config'][0]
info_data = body['info']
context = info_data['context']
service_type = info_data['service_type']
resource = config_data['resource']
operation = context['operation']
msg1 = ("Request recieved :: %s" % body)
LOG.info(msg1)
if 'device_ip' in context:
msg3 = ("POSTING DATA TO VM :: %s" % body)
LOG.info(msg3)
device_ip = context['device_ip']
ip = str(device_ip)
if operation == 'delete':
return
msg5 = ("Verifying vm reachability on ip: %s, port: %s" % (
ip, self.vm_port))
LOG.info(msg5)
is_vm_reachable = self._verify_vm_reachability(ip,
self.vm_port)
if is_vm_reachable:
requests.post(
'http://' + ip + ':' + self.vm_port + '/v1/nfp/' +
self.method_name, data=jsonutils.dumps(body))
msg4 = ("requests successfull for data: %s" % body)
LOG.info(msg4)
else:
raise Exception(_('VM is not reachable'))
cache_ips.add(device_ip)
else:
if (resource in NFP_SERVICE_LIST):
result = "unhandled"
self._push_notification(context,
result, config_data, service_type)
else:
result = "Unsupported resource type"
self._push_notification(context,
result, config_data, service_type)
except Exception as err:
pecan.response.status = 400
msg = ("Failed to serve HTTP post request %s %s."
% (self.method_name, str(err).capitalize()))
LOG.error(msg)
error_data = self._format_description(msg)
return jsonutils.dumps(error_data)
def _format_description(self, msg):
"""This methgod formats error description.
:param msg: An error message that is to be formatted
Returns: error_data dictionary
"""
error_data = {'failure_desc': {'msg': msg}}
return error_data

View File

@ -1,154 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from gbpservice.nfp.common import constants as const
''' The generic data format that is common for device and
service configuration.
'''
NFP_DATA_FORMAT = {
'config': [{
'resource': '',
'resource_data': {
'tenant_id': '',
'nfds': [{
'role': 'master',
'svc_mgmt_fixed_ip': '',
'networks': [{
'type': '',
'cidr': '',
'gw_ip': '',
'ports': [{
'fixed_ip': '',
'floating_ip': '',
'mac': ''}] # ports
}] # networks
}] # nfds
} # resource_data
}] # config
} # NFP_DATA_FORMAT
def _fill_service_specific_info(nfd, device_data, **kwargs):
''' Service specific data formatting is done here.
:param nfd: A partly built nested dict from NFP_DATA_FORMAT
:param device_data: Device data dictionary
:param kwargs: service specific arguments
Returns: nfd dict
'''
network_schema = kwargs.get('network_schema')
resource_type = kwargs.get('resource_type')
provider_network = nfd['networks'][0]
provider_port = provider_network['ports'][0]
if resource_type == const.FIREWALL:
nfd['svc_mgmt_fixed_ip'] = device_data.get('vm_management_ip')
provider_port['mac'] = device_data.get('provider_ptg_info')[0]
elif resource_type == const.VPN:
stitching_network = nfd['networks'][1]
stitching_port = stitching_network['ports'][0]
nfd['svc_mgmt_fixed_ip'] = device_data.get('fip')
provider_network['cidr'] = device_data.get('tunnel_local_cidr')
stitching_port['fixed_ip'] = device_data.get('fixed_ip')
stitching_port['floating_ip'] = device_data.get('user_access_ip')
stitching_network['cidr'] = device_data.get('stitching_cidr')
stitching_network['gw_ip'] = device_data.get('stitching_gateway')
management_network = copy.deepcopy(network_schema)
management_network['type'] = const.MANAGEMENT
management_network['gw_ip'] = device_data.get('mgmt_gw_ip')
nfd['networks'].append(management_network)
elif resource_type == const.LOADBALANCERV2:
nfd['svc_mgmt_fixed_ip'] = device_data.get('floating_ip')
provider_port['mac'] = device_data.get('provider_interface_mac')
return nfd
def get_network_function_info(device_data, resource_type):
''' Returns a generic configuration format for both device
and service configuration.
:param device_data: Data to be formatted. Type: dict
:param resource_type: (healthmonitor/device_config/firewall/
vpn/loadbalancer/loadbalancerv2)
Return: dictionary
'''
SERVICE_TYPES = [const.FIREWALL, const.VPN,
const.LOADBALANCERV2]
config = copy.deepcopy(NFP_DATA_FORMAT)
mgmt_ip = device_data.get('mgmt_ip_address')
tenant_id = device_data.get('tenant_id')
provider_ip = device_data.get('provider_ip')
provider_mac = device_data.get('provider_mac')
provider_cidr = device_data.get('provider_cidr')
stitching_ip = device_data.get('consumer_ip')
stitching_mac = device_data.get('consumer_mac')
stitching_cidr = device_data.get('consumer_cidr')
stitching_gateway_ip = device_data.get('consumer_gateway_ip')
resource_data = config['config'][0]['resource_data']
resource_data['tenant_id'] = tenant_id
nfd = resource_data['nfds'][0]
nfd['role'] = 'master'
nfd['svc_mgmt_fixed_ip'] = mgmt_ip
if resource_type == const.HEALTHMONITOR_RESOURCE:
nfd['periodicity'] = device_data.get('periodicity')
nfd['periodic_polling_reason'] = const.DEVICE_TO_BECOME_DOWN
nfd['vmid'] = device_data['id']
config['config'][0]['resource'] = const.HEALTHMONITOR_RESOURCE
return config
provider_network = nfd['networks'][0]
network_schema = copy.deepcopy(provider_network)
provider_network['type'] = const.PROVIDER
provider_network['cidr'] = provider_cidr
provider_network['gw_ip'] = ''
stitching_network = copy.deepcopy(network_schema)
stitching_network['type'] = const.STITCHING
stitching_network['cidr'] = stitching_cidr
stitching_network['gw_ip'] = stitching_gateway_ip
nfd['networks'].append(stitching_network)
provider_port = provider_network['ports'][0]
provider_port['fixed_ip'] = provider_ip
provider_port['floating_ip'] = ''
provider_port['mac'] = provider_mac
stitching_port = stitching_network['ports'][0]
stitching_port['fixed_ip'] = stitching_ip
stitching_port['floating_ip'] = ''
stitching_port['mac'] = stitching_mac
if resource_type in SERVICE_TYPES:
nfd = _fill_service_specific_info(nfd, device_data,
network_schema=network_schema,
resource_type=resource_type)
resource_data['nfs'] = resource_data.pop('nfds')
return config['config'][0]['resource_data']
config['config'][0]['resource'] = const.INTERFACE_RESOURCE
config['config'].append(config['config'][0].copy())
config['config'][1]['resource'] = const.ROUTES_RESOURCE
return config

View File

@ -1,137 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
import six
from gbpservice._i18n import _
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class NFPException(Exception):
"""Base NFP Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in list(self.kwargs.items()):
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in list(kwargs.items()):
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
self.msg = message
super(NFPException, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return six.text_type(self.msg)
class NotFound(NFPException):
message = _("Resource could not be found.")
code = 404
safe = True
class NetworkFunctionNotFound(NotFound):
message = _("NetworkFunction %(network_function_id)s could not be found")
class NetworkFunctionInstanceNotFound(NotFound):
message = _("NetworkFunctionInstance %(network_function_instance_id)s "
"could not be found")
class NetworkFunctionDeviceNotFound(NotFound):
message = _("NetworkFunctionDevice %(network_function_device_id)s could "
"not be found")
class NetworkFunctionDeviceInterfaceNotFound(NotFound):
message = _("NetworkFunctionDeviceInterface "
"%(network_function_device_interface_id)s could "
"not be found")
class NFPPortNotFound(NotFound):
message = _("NFP Port %(port_id)s could not be found")
class RequiredDataNotProvided(NFPException):
message = _("The required data %(required_data)s is missing in "
"%(request)s")
class IncompleteData(NFPException):
message = _("Data passed is incomplete")
class NotSupported(NFPException):
message = _("Feature is not supported")
class ComputePolicyNotSupported(NotSupported):
message = _("Compute policy %(compute_policy)s is not supported")
class HotplugNotSupported(NotSupported):
message = _("Vendor %(vendor)s doesn't support hotplug feature")

View File

@ -1,17 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NFP_NSO_TOPIC = "nfp-service-orchestrator"
NFP_NODE_DRIVER_CALLBACK_TOPIC = "nfp-node-driver-callbacks"
NFP_NDO_CONFIGURATOR_TOPIC = "nfp-ndo-configurator"
NFP_CONFIGURATOR_NDO_TOPIC = "nfp-ndo-notification-topic"
NFP_NSO_CONFIGURATOR_TOPIC = "nfp-nso-notification-topic"

View File

@ -1,122 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
import yaml
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
NEUTRON_ML2_CONF = "/etc/neutron/plugins/ml2/ml2_conf.ini"
def _parse_service_flavor_string(service_flavor_str):
service_details = {}
if ',' not in service_flavor_str:
service_details['device_type'] = 'nova'
service_details['service_vendor'] = service_flavor_str
else:
service_flavor_dict = dict(item.split('=') for item
in service_flavor_str.split(','))
service_details = {key.strip(): value.strip() for key, value
in six.iteritems(service_flavor_dict)}
return service_details
def _get_dict_desc_from_string(vpn_svc):
svc_desc = vpn_svc.split(";")
desc = {}
for ele in svc_desc:
s_ele = ele.split("=")
desc.update({s_ele[0]: s_ele[1]})
return desc
def get_vpn_description_from_nf(network_function):
str_description = network_function['description'].split('\n')[1]
description = _get_dict_desc_from_string(
str_description)
return description, str_description
def is_vpn_in_service_chain(sc_specs):
for spec in sc_specs:
nodes = spec['sc_nodes']
for node in nodes:
service_type = node['sc_service_profile']['service_type']
if service_type.lower() == nfp_constants.VPN:
return True
return False
def get_config_file(service_vendor):
file_name = service_vendor + '.day0'
return file_name
def get_service_vm_context(service_vendor, tenant_name=None):
""" Load day0 config file
:param service_vendor: service vendor name
:param tenant_name
- Day0 file name must start with service vendor name followed by
string '.day0'
e.g Vyos day0 file name must be vyos.day0
- File format can be of any type like text file, json file etc
- service vendor specific default day0 config file
/etc/nfp/<service_vendor>/<day0_file>
e.g /etc/nfp/vyos/vyos.day0
- tenant specific vendor day0 config file
/etc/nfp/<service_vendor>/<tenant_name>/<day0_file>
e.g /etc/nfp/vyos/services/vyos.day0
Returns - day0 config file
"""
try:
file_name = ''
default_config_dir = nfp_constants.CONFIG_DIR
vendor_day0_dir = default_config_dir + service_vendor + '/'
if tenant_name:
tenant_day0_dir = vendor_day0_dir + tenant_name + '/'
if os.path.isdir(tenant_day0_dir):
file_name = get_config_file(service_vendor)
if file_name:
day0_config_file = tenant_day0_dir + file_name
else:
if os.path.isdir(vendor_day0_dir):
file_name = get_config_file(service_vendor)
day0_config_file = vendor_day0_dir + file_name
else:
day0_config_file = '/fake_file_path'
with open(day0_config_file) as _file:
try:
svm_context = yaml.load(_file)
except Exception as e:
msg = ("Failed yaml load file %s. Reason: %s"
% (day0_config_file, e))
raise Exception(msg)
msg = ("Loaded day0 config file %s for service_vendor %s,"
"tenant_name %s" % (day0_config_file, service_vendor,
tenant_name))
LOG.info(msg)
return svm_context
except Exception as ex:
msg = ("Failed to read day0 config file, ERROR: %s" % ex)
LOG.error(msg)
return None

View File

@ -1,54 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg as oslo_config
CONF = oslo_config.CONF
NFP_OPTS = [
oslo_config.IntOpt(
'workers',
default=1,
help='Number of event worker process to be created.'
),
oslo_config.ListOpt(
'nfp_modules_path',
default='gbpservice.nfp.core.test',
help='Path for NFP modules.'
'All modules from this path are autoloaded by framework'
),
oslo_config.StrOpt(
'backend',
default='rpc',
help='Backend Support for communicationg with configurator.'
)
]
EXTRA_OPTS = [
oslo_config.StrOpt(
'logger_class',
default='gbpservice.nfp.core.log.WrappedLogger',
help='logger class path to handle logging seperately.'
),
]
def init(module, args, **kwargs):
"""Initialize the configuration. """
oslo_config.CONF.register_opts(EXTRA_OPTS)
oslo_config.CONF.register_opts(NFP_OPTS, module)
oslo_config.CONF(args=args, project='nfp',
version='%%(prog)s %s' % ('version'),
**kwargs)
return oslo_config.CONF

View File

@ -1,68 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg as oslo_cfg
from oslo_log import log as oslo_logging
oslo_logging.register_options(oslo_cfg.CONF)
class Object(object):
pass
def init():
"""Initialize logging. """
product_name = "nfp"
oslo_logging.setup(oslo_cfg.CONF, product_name)
def _is_class(obj):
return 'class' in str(type(obj))
def _name(obj):
"""Helper method to construct name of an object.
'module.class' if object is of type 'class'
'module.class.method' if object is of type 'method'
"""
# If it is callable, then it is a method
if callable(obj):
return "{0}.{1}.{2}".format(
type(obj.__self__).__module__,
type(obj.__self__).__name__,
obj.__name__)
# If obj is of type class
elif _is_class(obj):
return "{0}.{1}".format(
type(obj).__module__,
type(obj).__name__)
else:
return obj.__name__
def identify(obj):
"""Helper method to display identity an object.
Useful for logging. Decodes based on the type of obj.
Supports 'class' & 'method' types for now.
:param obj: Object (Class/Method supported.)
Returns: String. Identification of the object.
"""
prefix = obj._NAME_ if hasattr(obj, '_NAME_') else ''
try:
return "([%s] %s)" % (prefix, _name(obj))
except Exception:
# Some unknown type, returning empty
return ""

View File

@ -1,96 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
class LogContext(object):
def __init__(self, data):
self.data = data
def purge(self):
if self.data:
return {
'meta_id': self.data.get('meta_id', '-'),
'nfi_id': self.data.get('nfi_id', '-'),
'nfd_id': self.data.get('nfd_id', '-'),
'path': self.data.get('path'),
'auth_token': self.data.get('auth_token'),
'namespace': self.data.get('namespace')
}
return self.data
class CoreContext(object):
def __init__(self, data):
self.data = data
def purge(self):
return {
'log_context': LogContext(self.data.get('log_context')).purge(),
'event_desc': self.data.get('event_desc')
}
class NfpContext(object):
def __init__(self, data):
self.data = data
def purge(self):
return CoreContext(self.data).purge()
Context = threading.local()
def init_log_context():
return {
'meta_id': '-',
'nfi_id': '-',
'nfd_id': '-',
'path': '-',
'auth_token': None,
'namespace': None
}
def init(data=None):
if not data:
data = {}
if 'log_context' not in list(data.keys()):
data['log_context'] = init_log_context()
if 'event_desc' not in list(data.keys()):
data['event_desc'] = {}
Context.context = NfpContext(data)
context = getattr(Context, 'context')
return context.data
def get():
try:
context = getattr(Context, 'context')
return context.data
except AttributeError:
return init()
def purge():
try:
context = getattr(Context, 'context')
return context.purge()
except AttributeError:
init()
context = getattr(Context, 'context')
return context.purge()

View File

@ -1,726 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import collections
import multiprocessing
import operator
import os
import pickle
import sys
import time
import zlib
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg as oslo_config
from oslo_service import service as oslo_service
import six
from gbpservice.nfp.core import cfg as nfp_cfg
from gbpservice.nfp.core import common as nfp_common
from gbpservice.nfp.core import context
from gbpservice.nfp.core import event as nfp_event
from gbpservice.nfp.core import launcher as nfp_launcher
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import manager as nfp_manager
from gbpservice.nfp.core import rpc as nfp_rpc
from gbpservice.nfp.core import worker as nfp_worker
# REVISIT (mak): Unused, but needed for orchestrator,
# remove from here and add in orchestrator
from neutron.common import config
LOG = nfp_logging.getLogger(__name__)
PIPE = multiprocessing.Pipe
PROCESS = multiprocessing.Process
identify = nfp_common.identify
deque = collections.deque
# REVISIT (mak): fix to pass compliance check
config = config
"""Implements NFP service.
Base class for nfp modules, modules can invoke methods
of this class to interact with core.
"""
class NfpService(object):
def __init__(self, conf):
self._conf = conf
self._event_handlers = nfp_event.NfpEventHandlers()
self._rpc_agents = {}
def _make_new_event(self, event):
"""Make a new event from the object passed. """
desc = event.desc
event_dict = event.__dict__
event = self.create_event(**event_dict)
event.desc.from_desc(desc)
return event
def get_event_handlers(self):
return self._event_handlers
def register_events(self, event_descs, priority=0):
"""Register event handlers with core. """
nfp_context = context.get()
module = nfp_context['log_context']['namespace']
# REVISIT (mak): change name to register_event_handlers() ?
for event_desc in event_descs:
self._event_handlers.register(
event_desc.id, event_desc.handler,
module, priority=priority)
def register_rpc_agents(self, agents):
"""Register rpc handlers with core. """
for agent in agents:
topic = agent.topic
try:
self._rpc_agents[topic]['agents'].append(agent)
except KeyError:
self._rpc_agents[topic] = {}
self._rpc_agents[topic]['agents'] = [agent]
def new_event(self, **kwargs):
"""Define and return a new event. """
return self.create_event(**kwargs)
def create_event(self, **kwargs):
"""To create a new event. """
event = None
try:
event = nfp_event.Event(**kwargs)
except AssertionError as aerr:
message = "%s" % (aerr)
LOG.exception(message)
return event
def post_graph(self, graph_nodes, root_node):
for node in graph_nodes:
self.post_event(node)
self.post_event(root_node)
def post_event(self, event, target=None):
"""Post an event.
As a base class, it only does the descriptor preparation.
NfpController class implements the required functionality.
"""
handler, module = (
self._event_handlers.get_event_handler(event.id, module=target))
assert handler, "No handler registered for event %s" % (event.id)
event.desc.type = nfp_event.SCHEDULE_EVENT
event.desc.flag = nfp_event.EVENT_NEW
event.desc.pid = os.getpid()
event.desc.target = module
if event.lifetime == -1:
event.lifetime = nfp_event.EVENT_DEFAULT_LIFETIME
if not event.context:
# Log nfp_context for event handling code
event.context = context.purge()
event.desc.path_type = event.context['event_desc'].get('path_type')
event.desc.path_key = event.context['event_desc'].get('path_key')
return event
# REVISIT (mak): spacing=0, caller must explicitly specify
def poll_event(self, event, spacing=2, max_times=sys.maxsize):
"""To poll for an event.
As a base class, it only does the polling
descriptor preparation.
NfpController class implements the required functionality.
"""
nfp_context = context.get()
module = nfp_context['log_context']['namespace']
handler, ev_spacing = (
self._event_handlers.get_poll_handler(event.id, module=module))
assert handler, "No poll handler found for event %s" % (event.id)
assert spacing or ev_spacing, "No spacing specified for polling"
if ev_spacing:
spacing = ev_spacing
if event.desc.type != nfp_event.POLL_EVENT:
event = self._make_new_event(event)
event.desc.uuid = event.desc.uuid + ":" + "POLL_EVENT"
event.desc.type = nfp_event.POLL_EVENT
event.desc.target = module
event.desc.flag = None
kwargs = {'spacing': spacing,
'max_times': max_times}
poll_desc = nfp_event.PollDesc(**kwargs)
setattr(event.desc, 'poll_desc', poll_desc)
if not event.context:
# Log nfp_context for event handling code
event.context = context.purge()
event.desc.path_type = event.context['event_desc'].get('path_type')
event.desc.path_key = event.context['event_desc'].get('path_key')
return event
def event_complete(self, event, result=None):
"""To declare and event complete. """
try:
pickle.dumps(result)
uuid = event.desc.uuid
event = self._make_new_event(event)
event.desc.uuid = uuid
event.sequence = False
event.desc.flag = nfp_event.EVENT_COMPLETE
event.result = result
event.context = {}
event.data = {}
return event
except Exception as e:
raise e
def create_work(self, work):
"""Create a work, collection of events. """
pass
"""NFP Controller class mixin other nfp classes.
Nfp modules get the instance of this class when
they are initialized.
Nfp modules interact with core using the methods
of 'Service' class, whose methods are implemented
in this class.
Also, it mixes the other nfp core classes to complete
a nfp module request.
"""
class NfpController(nfp_launcher.NfpLauncher, NfpService):
def __new__(cls, *args, **kwargs):
singleton = kwargs.get('singleton', True)
if singleton is False:
return object.__new__(cls, *args, **kwargs)
if not hasattr(cls, '_instance'):
cls._instance = object.__new__(cls, *args, **kwargs)
else:
cls.__init__ = cls.__inited__
return cls._instance
def __inited__(self, conf):
pass
def __init__(self, conf, singleton=True):
# Init the super classes.
nfp_launcher.NfpLauncher.__init__(self, conf)
NfpService.__init__(self, conf)
# For book keeping
self._worker_process = {}
self._conf = conf
self._pipe = None
# Queue to stash events.
self._stashq = deque()
self._manager = nfp_manager.NfpResourceManager(conf, self)
self._worker = nfp_worker.NfpWorker(conf)
# ID of process handling this controller obj
self.PROCESS_TYPE = "distributor"
def compress(self, event):
# REVISIT (mak) : zip only if length is > than threshold (1k maybe)
if not event.zipped:
event.zipped = True
data = {'context': event.context}
event.context = {}
if event.data:
data['data'] = event.data
event.data = zlib.compress(str(data))
def decompress(self, event):
if event.zipped:
try:
data = ast.literal_eval(
zlib.decompress(event.data))
event.data = data.get('data')
event.context = data['context']
event.zipped = False
except Exception as e:
message = "Failed to decompress event data, Reason: %r" % (
e)
LOG.error(message)
raise e
def is_picklable(self, event):
"""To check event is picklable or not.
For sending event through pipe it must be picklable
"""
try:
pickle.dumps(event)
except Exception as e:
message = "(event - %s) is not picklable, Reason: %s" % (
event.identify(), e)
assert False, message
def pipe_recv(self, pipe):
event = None
try:
event = pipe.recv()
except Exception as exc:
LOG.debug("Failed to receive event from pipe "
"with exception - %r - will retry..", (exc))
eventlet.greenthread.sleep(1.0)
if event:
self.decompress(event)
return event
def pipe_send(self, pipe, event, resending=False):
self.is_picklable(event)
try:
# If there is no reader yet
if not pipe.poll():
self.compress(event)
pipe.send(event)
return True
except Exception as e:
message = ("Failed to send event - %s via pipe"
"- exception - %r - will resend" % (
event.identify(), e))
LOG.debug(message)
# If the event is being sent by resending task
# then dont append here, task will put back the
# event at right location
if not resending:
# If couldnt send event.. stash it so that
# resender task will send event again
self._stashq.append(event)
return False
def _fork(self, args):
proc = PROCESS(target=self.child, args=args)
proc.daemon = True
proc.start()
return proc
def _resending_task(self):
while(True):
try:
event = self._stashq.popleft()
if self.PROCESS_TYPE != "worker":
evm = self._manager._get_event_manager(event.desc.worker)
LOG.debug("Resending event - %s", (event.identify()))
sent = self.pipe_send(evm._pipe, event, resending=True)
else:
sent = self.pipe_send(self._pipe, event, resending=True)
# Put back in front
if not sent:
self._stashq.appendleft(event)
except IndexError:
pass
except Exception as e:
message = ("Unexpected exception - %r - while"
"sending event - %s" % (e, event.identify()))
LOG.error(message)
eventlet.greenthread.sleep(0.1)
def _manager_task(self):
while True:
# Run 'Manager' here to monitor for workers and
# events.
self._manager.manager_run()
eventlet.greenthread.sleep(0.1)
def _update_manager(self):
childs = self.get_childrens()
for pid, wrapper in six.iteritems(childs):
pipe = wrapper.child_pipe_map[pid]
# Inform 'Manager' class about the new_child.
self._manager.new_child(pid, pipe)
def _process_event(self, event):
self._manager.process_events([event])
def get_childrens(self):
# oslo_process.ProcessLauncher has this dictionary,
# 'NfpLauncher' derives oslo_service.ProcessLauncher
return self.children
def fork_child(self, wrap):
"""Forks a child.
Creates a full duplex pipe for child & parent
to communicate.
Returns: Multiprocess object.
"""
parent_pipe, child_pipe = PIPE(duplex=True)
# Registered event handlers of nfp module.
# Workers need copy of this data to dispatch an
# event to module.
proc = self._fork(args=(wrap.service, parent_pipe, child_pipe, self))
message = ("Forked a new child: %d"
"Parent Pipe: % s, Child Pipe: % s") % (
proc.pid, str(parent_pipe), str(child_pipe))
LOG.info(message)
try:
wrap.child_pipe_map[proc.pid] = parent_pipe
except AttributeError:
setattr(wrap, 'child_pipe_map', {})
wrap.child_pipe_map[proc.pid] = parent_pipe
self._worker_process[proc.pid] = proc
return proc.pid
def launch(self, workers):
"""Launch the controller.
Uses Oslo Service to launch with configured #of workers.
Spawns a manager task to manager nfp events & workers.
:param workers: #of workers to be launched
Returns: None
"""
super(NfpController, self).launch_service(
self._worker, workers=workers)
def post_launch(self):
"""Post processing after workers launch.
Tasks which needs to run only on distributor
process and any other resources which are not
expected to be forked are initialized here.
"""
self._update_manager()
# create and launch rpc service agent for each topic
for key, value in six.iteritems(self._rpc_agents):
agents = value['agents']
# Register NFP RPC managers in priority order,
# so that on rpc, oslo invokes them in the given order,
# This is required for NFP where multiple managers of
# different priority register for same rpc.
sorted_agents = sorted(
agents, key=operator.attrgetter('priority'), reverse=True)
rpc_managers = [agent.manager for agent in sorted_agents]
service = nfp_rpc.RpcService(topic=key, managers=rpc_managers)
# Launch rpc_service_agent
# Use threads for launching service
launcher = oslo_service.launch(
self._conf, service, workers=None)
self._rpc_agents[key]['service'] = service
self._rpc_agents[key]['launcher'] = launcher
# One task to manage the resources - workers & events.
eventlet.spawn_n(self._manager_task)
eventlet.spawn_n(self._resending_task)
# Oslo periodic task for state reporting
nfp_rpc.ReportStateTask(self._conf, self)
def report_state(self):
"""Invoked by report_task to report states of all agents. """
for value in list(self._rpc_agents.values()):
for agent in value['agents']:
agent.report_state()
def _verify_graph(self, graph):
"""Checks for sanity of a graph definition.
Checks if the same node is root node for
two subgraphs.
Unwinds graph and return two values -
graph signature and graph elements.
"""
graph_sig = {}
graph_nodes = []
for parent, childs in six.iteritems(graph):
puuid = parent.desc.uuid
assert puuid not in list(graph_sig.keys()), (
"Event - %s is already root of subgraph - %s" % (
puuid, str(graph_sig[puuid])))
graph_sig[puuid] = []
for child in childs:
graph_sig[puuid].append(child.desc.uuid)
graph_nodes.append(child)
return graph_sig, graph_nodes
def post_graph(self, graph, root, graph_str=''):
"""Post a new graph into the system.
Graph is definition of events to be
dispatched in a particular pattern.
"""
graph_sig, graph_nodes = self._verify_graph(graph)
graph_data = {
'id': root.desc.uuid + "_" + graph_str,
'root': root.desc.uuid,
'data': graph_sig}
for graph_node in graph_nodes:
graph_node.desc.graph = graph_data
root.desc.graph = graph_data
super(NfpController, self).post_graph(graph_nodes, root)
def post_event(self, event, target=None):
"""Post a new event into the system.
If distributor(main) process posts an event, it
is delivered to the worker.
If worker posts an event, it is deliverd to
distributor for processing, where it can decide
to loadbalance & sequence events.
:param event: Object of 'Event' class.
Returns: None
"""
event = super(NfpController, self).post_event(event, target=target)
message = "(event - %s) - New event" % (event.identify())
LOG.debug(message)
if self.PROCESS_TYPE == "worker":
# Event posted in worker context, send it to parent process
message = ("(event - %s) - new event in worker"
"posting to distributor process") % (event.identify())
LOG.debug(message)
# Send it to the distributor process
self.pipe_send(self._pipe, event)
else:
message = ("(event - %s) - new event in distributor"
"processing event") % (event.identify())
LOG.debug(message)
self._manager.process_events([event])
def poll_event(self, event, spacing=2, max_times=sys.maxsize):
"""Post a poll event into the system.
Core will poll for this event to timeout, after
timeout registered handler of module is invoked.
:param event: Object of 'Event' class.
:param spacing: Spacing at which event should timeout.
:param max_times: Max #of times the event can timeout,
after the max_times, event is auto cancelled by
the core and the registered handler of module
is invoked.
Returns: None
"""
# Poll event can only be posted by worker not by listener process
if self.PROCESS_TYPE != "worker":
message = "(event - %s) - poll event in distributor" % (
event.identify())
LOG.debug(message)
# 'Service' class to construct the poll event descriptor
event = super(NfpController, self).poll_event(
event, spacing=spacing, max_times=max_times)
self._manager.process_events([event])
else:
'''
# Only event which is delivered to a worker can be polled for, coz,
# after event timeouts, it should be delivered to the same worker,
# hence the check to make sure the correct event is been asked for
# polling.
assert event.desc.worker, "No worker for event %s" % (
event.identify())
LOG.debug("(event - %s) - poll event in worker" %
(event.identify()))
'''
# 'Service' class to construct the poll event descriptor
event = super(NfpController, self).poll_event(
event, spacing=spacing, max_times=max_times)
# Send to the distributor process.
self.pipe_send(self._pipe, event)
def stop_poll_event(self, key, id):
"""To stop the running poll event
:param key: key of polling event
:param id: id of polling event
"""
key = key + ":" + id + ":" + "POLL_EVENT"
event = self.new_event(id='STOP_POLL_EVENT', data={'key': key})
event.desc.type = nfp_event.POLL_EVENT
event.desc.flag = nfp_event.POLL_EVENT_STOP
if self.PROCESS_TYPE == "worker":
self.pipe_send(self._pipe, event)
else:
self._manager.process_events([event])
def path_complete_event(self):
"""Create event for path completion
"""
nfp_context = context.get()
event = self.new_event(id='PATH_COMPLETE')
event.desc.path_type = nfp_context['event_desc'].get('path_type')
event.desc.path_key = nfp_context['event_desc'].get('path_key')
if self.PROCESS_TYPE == "worker":
self.pipe_send(self._pipe, event)
else:
self._manager.process_events([event])
def event_complete(self, event, result=None):
"""To mark an event complete.
Module can invoke this API to mark an event complete.
a) Next event in sequence will be scheduled.
b) Event from cache is removed.
c) Polling for event is stopped.
d) If the worker dies before event is complete, the
event is scheduled to other available workers.
:param event: Obj of 'Event' class
Returns: None
"""
message = "(event - %s) complete" % (event.identify())
LOG.debug(message)
event = super(NfpController, self).event_complete(event, result=result)
if self.PROCESS_TYPE == "distributor":
self._manager.process_events([event])
else:
# Send to the distributor process.
self.pipe_send(self._pipe, event)
def load_nfp_modules(conf, controller):
modules_dirs = conf.nfp_modules_path
pymodules = []
for _dir in modules_dirs:
pymodules.extend(load_nfp_modules_from_path(conf, controller,
_dir))
return pymodules
def load_nfp_modules_from_path(conf, controller, path):
""" Load all nfp modules from configured directory. """
pymodules = []
nfp_context = context.get()
try:
base_module = __import__(path,
globals(), locals(), ['modules'], -1)
modules_dir = base_module.__path__[0]
try:
files = os.listdir(modules_dir)
for pyfile in set([f for f in files if f.endswith(".py")]):
try:
pymodule = __import__(path,
globals(), locals(),
[pyfile[:-3]], -1)
pymodule = eval('pymodule.%s' % (pyfile[:-3]))
try:
namespace = pyfile[:-3].split(".")[-1]
nfp_context['log_context']['namespace'] = namespace
pymodule.nfp_module_init(controller, conf)
pymodules += [pymodule]
message = "(module - %s) - Initialized" % (
identify(pymodule))
LOG.debug(message)
except AttributeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_traceback)
LOG.error(message)
message = ("(module - %s) - does not implement"
"nfp_module_init()") % (identify(pymodule))
LOG.warning(message)
except ImportError:
message = "Failed to import module %s" % (pyfile)
LOG.error(message)
except OSError:
message = "Failed to read files from %s" % (modules_dir)
LOG.error(message)
except ImportError:
message = "Failed to import module from path %s" % (
path)
LOG.error(message)
return pymodules
def controller_init(conf, nfp_controller):
nfp_controller.launch(conf.workers)
# Wait for conf.workers*1 + 1 secs for workers to comeup
time.sleep(conf.workers * 1 + 1)
nfp_controller.post_launch()
def nfp_modules_post_init(conf, nfp_modules, nfp_controller):
nfp_context = context.get()
for module in nfp_modules:
try:
namespace = module.__name__.split(".")[-1]
nfp_context['log_context']['namespace'] = namespace
module.nfp_module_post_init(nfp_controller, conf)
except AttributeError:
message = ("(module - %s) - does not implement"
"nfp_module_post_init(), ignoring") % (identify(module))
LOG.debug(message)
def extract_module(args):
try:
index = args.index('--module')
module = args[index + 1]
args.remove('--module')
args.remove(module)
return args, module
except ValueError:
print("--module <name> missing from cmd args")
sys.exit(-1)
def load_module_opts(conf):
module = conf.module
# register each opt from <module> section
# to default section.
module_opts = eval('conf.%s.keys' % (module))()
for module_opt in module_opts:
module_cfg_opt = eval("conf.%s._group._opts['%s']['opt']" % (
module, module_opt))
module_cfg_opt_value = eval("conf.%s.%s" % (module, module_opt))
conf.register_opt(module_cfg_opt)
conf.set_override(module_opt, module_cfg_opt_value)
def main():
context.init()
args, module = extract_module(sys.argv[1:])
conf = nfp_cfg.init(module, args)
conf.module = module
load_module_opts(conf)
nfp_logging.init_logger(oslo_config.CONF.logger_class)
nfp_common.init()
nfp_controller = NfpController(conf)
# Load all nfp modules from path configured
nfp_modules = load_nfp_modules(conf, nfp_controller)
# Init the controller, launch required contexts
controller_init(conf, nfp_controller)
# post_init of each module
nfp_modules_post_init(conf, nfp_modules, nfp_controller)
# Wait for every exec context to complete
nfp_controller.wait()

View File

@ -1,412 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import uuid as pyuuid
from gbpservice.nfp.core import common as nfp_common
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api
from gbpservice.nfp.core import sequencer as nfp_seq
LOG = nfp_logging.getLogger(__name__)
identify = nfp_common.identify
"""Event Types """
SCHEDULE_EVENT = 'schedule_event'
POLL_EVENT = 'poll_event'
STASH_EVENT = 'stash_event'
"""Event Flag """
EVENT_NEW = 'new_event'
EVENT_COMPLETE = 'event_done'
EVENT_ACK = 'event_ack'
POLL_EVENT_STOP = 'poll_event_stop'
EVENT_DEFAULT_LIFETIME = 600
"""Sequencer status. """
SequencerEmpty = nfp_seq.SequencerEmpty
SequencerBusy = nfp_seq.SequencerBusy
deque = collections.deque
"""Defines poll descriptor of an event.
Holds all of the polling information of an
event.
"""
class PollDesc(object):
def __init__(self, **kwargs):
# Spacing of the event, event will timeout @this spacing.
self.spacing = kwargs.get('spacing')
# Max times event can be polled, is autocancelled after.
self.max_times = kwargs.get('max_times')
# Reference to original event, UUID.
self.ref = kwargs.get('ref')
"""Defines the descriptor of an event.
Holds the metadata for an event. Useful
for event processing. Not exposed to nfp modules.
"""
class EventDesc(object):
def __init__(self, **kwargs):
# Unique id of the event, use what user passed or
# generate a new unique id.
uuid = kwargs.get('key', pyuuid.uuid4())
id = kwargs.get('id', '')
self.uuid = str(uuid) + ':' + id
# see 'Event Types'
self.type = kwargs.get('type')
# see 'Event Flag'
self.flag = kwargs.get('flag')
# PID of worker which is handling this event
self.worker = kwargs.get('worker')
# Polling descriptor of event
self.poll_desc = kwargs.get('poll_desc')
# Target module to which this event must be delivered
self.target = None
# ID of graph of which this event is part of
self.graph = None
# Type of path to which this event belongs CREATE/UPDATE/DELETE
self.path_type = kwargs.get('path_type')
# Unique key for the path
self.path_key = kwargs.get('path_key')
# Marks whether an event was acked or not
self.acked = False
def from_desc(self, desc):
self.type = desc.type
self.flag = desc.flag
self.worker = desc.worker
self.poll_desc = desc.poll_desc
self.path_type = desc.path_type
self.path_key = desc.path_key
def to_dict(self):
return {'uuid': self.uuid,
'type': self.type,
'flag': self.flag,
'worker': self.worker,
'poll_desc': self.poll_desc,
'path_type': self.path_type,
'path_key': self.path_key
}
"""Defines the event structure.
Nfp modules need to create object of the class
to create an event.
"""
class Event(object):
def __init__(self, **kwargs):
# ID of event as passed by module
self.id = kwargs.get('id')
# Data blob
self.data = kwargs.get('data')
# Whether to sequence this event w.r.t
# other related events.
self.sequence = kwargs.get('serialize', False)
# Unique key to be associated with the event
self.key = kwargs.get('key')
# Binding key to define relation between
# different events.
self.binding_key = kwargs.get('binding_key')
# Handler of the event.
self.handler = kwargs.get('handler')
# Lifetime of the event in seconds.
self.lifetime = kwargs.get('lifetime', -1)
# Identifies whether event.data is zipped
self.zipped = False
# Log metadata context
self.context = kwargs.get('context', {})
# Prepare the base descriptor
desc = kwargs.get('desc_dict')
if desc:
desc['key'] = self.key
desc['id'] = self.id
desc = EventDesc(**desc)
elif self.key:
desc = EventDesc(**{'key': self.key,
'id': self.id})
else:
desc = EventDesc(**{'id': self.id})
self.desc = desc
self.result = None
cond = self.sequence is True and self.binding_key is None
assert not cond
def identify(self):
if hasattr(self, 'desc'):
return "uuid=%s,id=%s,type=%s,flag=%s" % (
self.desc.uuid, self.id, self.desc.type, self.desc.flag)
return "id=%s" % (self.id)
"""Table of event handler's.
Maintains cache of every module's event handlers.
Also, maintains the polling against event_id
which are provided as decorators.
"""
class NfpEventHandlers(object):
def __init__(self):
# {'event.id': [(event_handler, poll_handler, spacing)]
self._event_desc_table = {}
def _log_meta(self, event_id, event_handler=None):
if event_handler:
return "(event_id - %s) - (event_handler - %s)" % (
event_id, identify(event_handler))
else:
return "(event_id - %s) - (event_handler - None)" % (event_id)
def register(self, event_id, event_handler, module, priority=0):
"""Registers a handler for event_id.
Also fetches the decorated poll handlers if any
for the event and caches it.
"""
if not isinstance(event_handler, nfp_api.NfpEventHandler):
message = "%s - Handler is not instance of NfpEventHandler" % (
self._log_meta(event_id, event_handler))
LOG.error(message)
return
try:
poll_desc_table = event_handler.get_poll_desc_table()
poll_handler = poll_desc_table[event_id]
spacing = poll_handler._spacing
except KeyError:
# Default the poll handler and spacing values
poll_handler = event_handler.handle_poll_event
spacing = 0
try:
try:
self._event_desc_table[event_id]['modules'][module].append(
(event_handler, poll_handler, spacing, module))
except KeyError:
self._event_desc_table[event_id]['modules'][module] = [
(event_handler, poll_handler, spacing, module)]
try:
self._event_desc_table[event_id]['priority'][priority].append(
(event_handler, poll_handler, spacing, module))
except KeyError:
self._event_desc_table[event_id]['priority'][priority] = [
(event_handler, poll_handler, spacing, module)]
except KeyError:
self._event_desc_table[event_id] = {'modules': {}, 'priority': {}}
self._event_desc_table[event_id]['modules'][module] = [
(event_handler, poll_handler, spacing, module)]
self._event_desc_table[event_id]['priority'][priority] = [
(event_handler, poll_handler, spacing, module)]
message = "%s - Registered handler" % (
self._log_meta(event_id, event_handler))
LOG.debug(message)
def get_event_handler(self, event_id, module=None):
"""Get the handler for the event_id. """
eh = None
rmodule = None
try:
if module:
eh = self._event_desc_table[event_id]['modules'][module][0][0]
rmodule = (
self._event_desc_table[event_id]['modules'][module][0][3])
else:
priorities = (
list(self._event_desc_table[event_id]['priority'].keys()))
priority = max(priorities)
eh = (
self._event_desc_table[
event_id]['priority'][priority][0][0])
rmodule = (
self._event_desc_table[
event_id]['priority'][priority][0][3])
finally:
message = "%s - Returning event handler" % (
self._log_meta(event_id, eh))
LOG.debug(message)
return eh, rmodule
def get_poll_handler(self, event_id, module=None):
"""Get the poll handler for event_id. """
ph, spacing = None, None
try:
if module:
ph = self._event_desc_table[event_id]['modules'][module][0][1]
spacing = self._event_desc_table[
event_id]['modules'][module][0][2]
else:
priorities = (
list(self._event_desc_table[event_id]['priority'].keys()))
priority = max(priorities)
ph = (
self._event_desc_table[
event_id]['priority'][priority][0][1])
spacing = self._event_desc_table[
event_id]['priority'][priority][0][2]
finally:
message = "%s - Returning poll handler" % (
self._log_meta(event_id, ph))
LOG.debug(message)
return ph, spacing
def get_poll_spacing(self, event_id):
"""Return the spacing for event_id. """
spacing = 0
try:
spacing = self._event_desc_table[event_id][0][2]
finally:
message = "%s - Poll spacing %d" % (
self._log_meta(event_id), spacing)
LOG.debug(message)
return spacing
"""Manages the lifecycle of event of a process.
Each process (worker/distributor) is associated
with a event manager. Event manager pulls events
from the pipe, caches it, sequences & dispatches
the events.
"""
class NfpEventManager(object):
def __init__(self, conf, controller, sequencer, pipe=None, pid=-1):
self._conf = conf
self._controller = controller
# PID of process to which this event manager is associated
self._pid = pid
# Duplex pipe to read & write events
self._pipe = pipe
# Cache of UUIDs of events which are dispatched to
# the worker which is handled by this em.
self._cache = deque()
# Load on this event manager - num of events pending to be completed
self._load = 0
def _log_meta(self, event=None):
if event:
return "(event - %s) - (event_manager - %d)" % (
event.identify(), self._pid)
else:
return "(event_manager - %d" % (self._pid)
def _wait_for_events(self, pipe, timeout=0.01):
"""Wait & pull event from the pipe.
Wait till timeout for the first event and then
pull as many as available.
Returns: Events[] pulled from pipe.
"""
events = []
try:
ret = pipe.poll(timeout)
if ret:
event = self._controller.pipe_recv(pipe)
if event:
events.append(event)
except multiprocessing.TimeoutError as err:
message = "%s" % (err)
LOG.exception(message)
return events
def init_from_event_manager(self, em):
"""Initialize from existing event manager.
Invoked when an event manager has to take over
existing event manager.
Whole cache is replaced and events are replayed.
This is used in case where a worker dies, dead
workers event manager is assigned to new worker.
"""
# Replay all the events from cache.
self._cache = em._cache
def get_pending_events(self):
return list(self._cache)
def get_load(self):
"""Return current load on the manager."""
return self._load
def pop_event(self, event):
"""Pop the passed event from cache.
Is called when an event is complete/cancelled.
If the event was sequenced, then sequencer is
released to schedule next event.
Removes event from cache.
"""
message = "%s - pop event" % (self._log_meta(event))
LOG.debug(message)
try:
self._cache.remove(event.desc.uuid)
self._load -= 1
except ValueError as verr:
verr = verr
message = "%s - event not in cache" % (
self._log_meta(event))
LOG.debug(message)
def dispatch_event(self, event, event_type=None,
inc_load=True, cache=True):
"""Dispatch event to the worker.
Sends the event to worker through pipe.
Increments load if event_type is SCHEDULED event,
poll_event does not contribute to load.
"""
message = "%s - Dispatching to worker %d" % (
self._log_meta(event), self._pid)
LOG.debug(message)
# Update the worker information in the event.
event.desc.worker = self._pid
# Update the event with passed type
if event_type:
event.desc.type = event_type
# Send to the worker
self._controller.pipe_send(self._pipe, event)
self._load = (self._load + 1) if inc_load else self._load
# Add to the cache
if cache:
self._cache.append(event.desc.uuid)
def event_watcher(self, timeout=0.01):
"""Watch for events. """
return self._wait_for_events(self._pipe, timeout=timeout)

View File

@ -1,231 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from argparse import Namespace
import six
from gbpservice._i18n import _
from gbpservice.nfp.core import context
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import threadpool as core_tp
LOG = nfp_logging.getLogger(__name__)
class InUse(Exception):
"""Exception raised when same task executor instance
is fired twice or jobs
added after executor is fired.
"""
pass
def check_in_use(f):
"""Check if instance of task executor is already
fired and executing jobs.
"""
def wrapped(self, *args, **kwargs):
if self.fired:
raise InUse(_("Executor in use"))
return f(self, *args, **kwargs)
return wrapped
class TaskExecutor(object):
"""Executes given jobs in green threads.
Any number of jobs can be added till executor
is fired. When fired, executes all jobs in
parallel in green threads. Waits for threads
to complete, captures the return values of thread
function.
Caller can choose to pass result_store where the
return value will be updated.
"""
def __init__(self, jobs=0):
if not jobs:
self.thread_pool = core_tp.ThreadPool()
else:
self.thread_pool = core_tp.ThreadPool(thread_pool_size=jobs)
self.pipe_line = []
self.fired = False
@check_in_use
def add_job(self, id, func, *args, **kwargs):
result_store = kwargs.pop('result_store', None)
job = {
'id': id, 'method': func,
'args': args, 'kwargs': kwargs
}
if result_store is not None:
job.update({'result_store': result_store})
LOG.debug("TaskExecutor - (job - %s) added to pipeline",
(str(job)))
self.pipe_line.append(job)
def _complete(self):
LOG.debug("TaskExecutor - complete")
self.pipe_line = []
self.fired = False
def dispatch(self, job):
context.init()
return job['method'](*job['args'], **job['kwargs'])
@check_in_use
def fire(self):
self.fired = True
for job in self.pipe_line:
LOG.debug(
"TaskExecutor - (job - %s) dispatched",
(str(job)))
th = self.thread_pool.dispatch(self.dispatch, job)
job['thread'] = th
for job in self.pipe_line:
result = job['thread'].wait()
LOG.debug(
"TaskExecutor - (job - %s) complete",
(str(job)))
job.pop('thread')
job['result'] = result
if 'result_store' in list(job.keys()):
job['result_store']['result'] = result
done_jobs = self.pipe_line[:]
self._complete()
return done_jobs
class EventGraphExecutor(object):
"""Executor which executs a graph of events.
An event graph can consist of events defined
in any combination of parallel and sequence
events. Executor will execute them in the
order and manner specified.
Eg., E1 -> (E2, E3)
[E1 should execute after E2, E3 completes,
while E2 & E3 can happen in parallel]
E2 -> (E4, E5)
[E2 should execute after E4, E5 completes,
while E4 & E5 should happen in sequence]
E3 -> (None)
[No child events for E3]
Executor will run the above graph and execute events
in the exact specific order mentioned.
At each level, parent event holds the result of child
events, caller can use parent event complete notification
to get the child events execution status.
"""
def __init__(self, manager):
self.manager = manager
self.running = {}
def add(self, graph):
assert graph['id'] not in list(self.running.keys()), "Graph - %s \
is already running" % (graph['id'])
graph['results'] = dict.fromkeys(graph['data'])
self.running[graph['id']] = graph
self.run(graph['id'], graph['root'])
def run(self, graph_id, node):
graph = self.running[graph_id]
leafs = self._leafs(graph['data'], node)
if leafs == []:
results = self._results(graph, node)
self._schedule(node, results=results)
else:
self._dispatch(graph, leafs)
def _results(self, graph, node):
try:
return self.running['results'][node]
except KeyError:
return []
def _dispatch(self, graph, nodes):
for node in nodes:
event = self.manager.get_event(node)
if event.sequence:
self._schedule(node)
else:
self.run(graph['id'], node)
def _leafs(self, tree, root):
leafs = []
try:
leafs = tree[root]
finally:
return leafs
def _root(self, graph, of):
tree = graph['data']
for root, nodes in six.iteritems(tree):
if of in nodes:
return root
return None
def _schedule(self, node, results=None):
results = results or []
event = self.manager.get_event(node)
event.result = results
self.manager._scheduled_new_event(event)
def _graph(self, node):
for graph in list(self.running.values()):
root = self._root(graph, node)
if root:
return graph
def _prepare_result(self, node, result):
result_obj = Namespace()
key, id = node.split(':')
result_obj.id = id
result_obj.key = key
result_obj.result = result
return result_obj
def _update_result(self, graph, root, result):
if not graph['results'][root]:
graph['results'][root] = []
graph['results'][root].append(result)
return graph['results'][root]
def conntinue(self, completed_node, result):
graph = self._graph(completed_node)
if graph:
if completed_node == graph['root']:
# Graph is complete here, remove from running_instances
self.running.pop(graph['id'])
else:
root = self._root(graph, completed_node)
graph['data'][root].remove(completed_node)
result = self._prepare_result(completed_node, result)
results = self._update_result(graph, root, result)
if graph['data'][root] == []:
self._schedule(root, results=results)

Some files were not shown because too many files have changed in this diff Show More