Aligning with liberty dependencies

Change-Id: Ia1800b633e3172bdcabfefed6bead1d460dd590e
This commit is contained in:
Sumit Naiksatam 2015-11-02 10:05:21 -08:00
parent 2dce1c2829
commit c806a88f3f
51 changed files with 368 additions and 1791 deletions

View File

@ -1,9 +1,199 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"owner": "tenant_id:%(tenant_id)s",
"admin_or_owner": "rule:context_is_admin or rule:owner",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"shared_firewall_policies": "field:firewall_policies:shared=True",
"shared_subnetpools": "field:subnetpools:shared=True",
"shared_address_scopes": "field:address_scopes:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_subnetpool": "",
"create_subnetpool:shared": "rule:admin_only",
"get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools",
"update_subnetpool": "rule:admin_or_owner",
"delete_subnetpool": "rule:admin_or_owner",
"create_address_scope": "",
"create_address_scope:shared": "rule:admin_only",
"get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes",
"update_address_scope": "rule:admin_or_owner",
"update_address_scope:shared": "rule:admin_only",
"delete_address_scope": "rule:admin_or_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"network_device": "field:port:device_owner=~^network:",
"create_port": "",
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"insert_rule": "rule:admin_or_owner",
"remove_rule": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"get_agent-loadbalancers": "rule:admin_only",
"get_loadbalancer-hosting-agent": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only",
"create_flavor": "rule:admin_only",
"update_flavor": "rule:admin_only",
"delete_flavor": "rule:admin_only",
"get_flavors": "rule:regular_user",
"get_flavor": "rule:regular_user",
"get_policy": "rule:regular_user",
"create_policy": "rule:admin_only",
"update_policy": "rule:admin_only",
"delete_policy": "rule:admin_only",
"get_policy_bandwidth_limit_rule": "rule:regular_user",
"create_policy_bandwidth_limit_rule": "rule:admin_only",
"delete_policy_bandwidth_limit_rule": "rule:admin_only",
"update_policy_bandwidth_limit_rule": "rule:admin_only",
"get_rule_type": "rule:regular_user",
"restrict_wildcard": "(not field:rbac_policy:target_tenant=*) or rule:admin_only",
"create_rbac_policy": "",
"create_rbac_policy:target_tenant": "rule:restrict_wildcard",
"update_rbac_policy": "rule:admin_or_owner",
"update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner",
"get_rbac_policy": "rule:admin_or_owner",
"delete_rbac_policy": "rule:admin_or_owner",
"shared_ptg": "field:policy_target_groups:shared=True",
"shared_pt": "field:policy_targets:shared=True",
"shared_prs": "field:policy_rule_sets:shared=True",

View File

@ -148,4 +148,4 @@ class API(object):
def remove_router_interface(self, context, router_id, interface):
return client.get_client(context).remove_interface_router(router_id,
interface)
interface)

View File

@ -13,11 +13,11 @@
from neutron.common import exceptions as nexcp
from neutron import context
from neutron.db import common_db_mixin
from neutron.db import quota_db
from neutron import quota
from neutron.db.quota import driver
from neutron.quota import resource as quota_resource
QUOTA_DRIVER = quota_db.DbQuotaDriver
QUOTA_DRIVER = driver.DbQuotaDriver
DB_CLASS_TO_RESOURCE_NAMES = {}
@ -31,8 +31,8 @@ class GBPQuotaBase(common_db_mixin.CommonDbMixin):
ctx = context.Context(user_id=None, tenant_id=tenant_id)
class_name = self.__class__.__name__
resource = DB_CLASS_TO_RESOURCE_NAMES[class_name]
d = {resource: quota.CountableResource(resource, None,
"quota_" + resource)}
d = {resource: quota_resource.CountableResource(resource, None,
"quota_" + resource)}
resource_quota = QUOTA_DRIVER.get_tenant_quotas(ctx, d,
tenant_id)[resource]
if resource_quota == -1:

View File

@ -17,8 +17,8 @@ from neutron import context
from neutron.db import common_db_mixin
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import uuidutils
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc

View File

@ -12,8 +12,8 @@
from neutron.common import log
from neutron.db import model_base
from neutron.openstack.common import uuidutils
from oslo_log import log as logging
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm

View File

@ -53,4 +53,4 @@ def upgrade():
def downgrade():
pass
pass

View File

@ -17,10 +17,10 @@ from neutron.db import common_db_mixin
from neutron.db import model_base
from neutron.db import models_v2
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as pconst
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm

View File

@ -17,12 +17,12 @@ from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import constants as n_constants
from neutron.common import exceptions as nexc
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron import quota
from neutron.quota import resource_registry
from neutron.services import service_base
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
import gbpservice.neutron.extensions
@ -36,10 +36,6 @@ from gbpservice.neutron.services.grouppolicy.common import (
# the GBP service to be loaded correctly. GBP extensions' path is added
# to Neutron's so that it's found at extension scanning time.
extensions.append_api_extensions_path(gbpservice.neutron.extensions.__path__)
constants.GROUP_POLICY = "GROUP_POLICY"
constants.COMMON_PREFIXES["GROUP_POLICY"] = "/grouppolicy"
constants.EXT_TO_SERVICE_MAPPING['gp'] = constants.GROUP_POLICY
constants.ALLOWED_SERVICES.append(constants.GROUP_POLICY)
LOG = logging.getLogger(__name__)
@ -880,7 +876,7 @@ class Group_policy(extensions.ExtensionDescriptor):
'policy_rule_set', 'external_policy',
'external_segment', 'nat_pool',
'network_service_policy']:
quota.QUOTAS.register_resource_by_name(resource_name)
resource_registry.register_resource_by_name(resource_name)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.GROUP_POLICY)

View File

@ -12,6 +12,7 @@
from neutron.db import l3_db
from neutron.db import securitygroups_db
from neutron import manager
# Monkey patch create floatingip to allow subnet_id to be specified.
@ -129,3 +130,9 @@ def _get_security_groups_on_port(self, context, port):
securitygroups_db.SecurityGroupDbMixin._get_security_groups_on_port = (
_get_security_groups_on_port)
def _load_flavors_manager(self):
pass
manager.NeutronManager._load_flavors_manager = _load_flavors_manager

View File

@ -10,17 +10,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from neutron.api.v2 import attributes
from neutron.common import exceptions as exc
from neutron.common import ipv6_utils
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
from neutron import i18n
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import plugin
from oslo_concurrency import lockutils
from oslo_db import exception as os_db_exception
from oslo_log import log
from oslo_utils import excutils
@ -37,7 +35,7 @@ class InfiniteLoopError(exc.NeutronException):
# REVISIT(rkukura): Partially address bug 1510327 (GBP: Deleting
# groups leads to subnet-delete in infinite loop) by limiting the
# retry loop and doing additional info-level logging. This is based on
# the stable/kilo version of neutron.plugins.ml2.plugin.Ml2Plugin.
# the stable/liberty version of neutron.plugins.ml2.plugin.Ml2Plugin.
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
@ -62,15 +60,14 @@ def delete_network(self, context, id):
# to 'lock wait timeout' errors.
#
# Process L3 first, since, depending on the L3 plugin, it may
# involve locking the db-access semaphore, sending RPC
# notifications, and/or calling delete_port on this plugin.
# involve sending RPC notifications, and/or calling delete_port
# on this plugin.
# Additionally, a rollback may not be enough to undo the
# deletion of a floating IP with certain L3 backends.
self._process_l3_delete(context, id)
# Using query().with_lockmode isn't necessary. Foreign-key
# constraints prevent deletion if concurrent creation happens.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
with session.begin(subtransactions=True):
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
@ -107,19 +104,22 @@ def delete_network(self, context, id):
# network record, so explicit removal is not necessary.
LOG.debug("Committing transaction")
break
port_ids = [port.id for port in ports]
subnet_ids = [subnet.id for subnet in subnets]
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
LOG.warning(_("A concurrent port creation has "
"occurred"))
LOG.warning(i18n._LW("A concurrent port creation has "
"occurred"))
continue
LOG.info(_("Auto-deleting ports %(ports)s for network %(net)s"),
LOG.info(i18n._LI("Auto-deleting ports %(ports)s for network %(net)s"),
{'ports': ports, 'net': id})
self._delete_ports(context, ports)
LOG.info(_("Auto-deleting subnets %(subnets)s for network %(net)s"),
{'subnets': subnets, 'net': id})
self._delete_subnets(context, subnets)
self._delete_ports(context, port_ids)
LOG.info(i18n._LI("Auto-deleting subnets %(subnets)s for network "
"%(net)s"), {'subnets': subnets, 'net': id})
self._delete_subnets(context, subnet_ids)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
@ -127,15 +127,15 @@ def delete_network(self, context, id):
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_("mechanism_manager.delete_network_postcommit"
" failed"))
LOG.error(i18n._LE("mechanism_manager.delete_network_postcommit"
" failed"))
self.notifier.network_delete(context, id)
plugin.Ml2Plugin.delete_network = delete_network
# REVISIT(rkukura): Related to bug 1510327, also limit the retry loop
# here. This is based on the stable/kilo version of
# here. This is based on the stable/liberty version of
# neutron.plugins.ml2.plugin.Ml2Plugin.
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
@ -154,14 +154,9 @@ def delete_subnet(self, context, id):
{'attempt': attempt, 'subnet': id})
if attempt > 100:
raise InfiniteLoopError()
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
with session.begin(subtransactions=True):
record = self._get_subnet(context, id)
subnet = self._make_subnet_dict(record, None)
subnet = self._make_subnet_dict(record, None, context=context)
qry_allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port))
@ -179,7 +174,8 @@ def delete_subnet(self, context, id):
allocated = qry_allocated.all()
# Delete all the IPAllocation that can be auto-deleted
if allocated:
map(session.delete, allocated)
for x in allocated:
session.delete(x)
LOG.debug("Ports to auto-deallocate: %s", allocated)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
@ -188,40 +184,64 @@ def delete_subnet(self, context, id):
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
if self._subnet_check_ip_allocations(context, id):
LOG.debug("Found IP allocations on subnet %s, "
"cannot delete", id)
raise exc.SubnetInUse(subnet_id=id)
alloc = self._subnet_check_ip_allocations(context, id)
if alloc:
user_alloc = self._subnet_get_user_allocation(
context, id)
if user_alloc:
LOG.info(i18n._LI("Found port (%(port_id)s, %(ip)s) "
"having IP allocation on subnet "
"%(subnet)s, cannot delete"),
{'ip': user_alloc.ip_address,
'port_id': user_alloc.port_id,
'subnet': id})
raise exc.SubnetInUse(subnet_id=id)
else:
# allocation found and it was DHCP port
# that appeared after autodelete ports were
# removed - need to restart whole operation
raise os_db_exception.RetryRequest(
exc.SubnetInUse(subnet_id=id))
db_base_plugin_v2._check_subnet_not_used(context, id)
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
network = self.get_network(context, subnet['network_id'])
mech_context = driver_context.SubnetContext(self, context,
subnet)
subnet,
network)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug("Deleting subnet record")
session.delete(record)
# The super(Ml2Plugin, self).delete_subnet() is not called,
# so need to manually call delete_subnet for pluggable ipam
self.ipam.delete_subnet(context, id)
LOG.debug("Committing transaction")
break
for a in allocated:
if a.port_id:
if a.port:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {attributes.PORT:
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.ports.fixed_ips
for ip in a.port.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except exc.PortNotFound:
LOG.debug("Port %s deleted concurrently", a.port_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception deleting fixed_ip "
"from port %s"), a.port_id)
LOG.exception(i18n._LE("Exception deleting fixed_ip "
"from port %s"), a.port_id)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
@ -229,6 +249,7 @@ def delete_subnet(self, context, id):
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_("mechanism_manager.delete_subnet_postcommit failed"))
LOG.error(i18n._LE(
"mechanism_manager.delete_subnet_postcommit failed"))
plugin.Ml2Plugin.delete_subnet = delete_subnet

View File

@ -16,9 +16,8 @@ from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexc
from neutron.common import log
from neutron.plugins.common import constants
from neutron import quota
from neutron.quota import resource_registry
from neutron.services import service_base
from oslo_config import cfg
from oslo_log import log as logging
@ -32,11 +31,7 @@ from gbpservice.neutron.services.servicechain.common import constants as scc
# The code below is a monkey patch of key Neutron's modules. This is needed for
# the GBP service to be loaded correctly. GBP extensions' path is added
# to Neutron's so that it's found at extension scanning time.
extensions.append_api_extensions_path(gbpservice.neutron.extensions.__path__)
constants.SERVICECHAIN = "SERVICECHAIN"
constants.COMMON_PREFIXES["SERVICECHAIN"] = "/servicechain"
LOG = logging.getLogger(__name__)
@ -285,7 +280,7 @@ class Servicechain(extensions.ExtensionDescriptor):
attr.PLURALS.update(plural_mappings)
for resource_name in ['servicechain_node', 'servicechain_spec',
'servicechain_instance', 'service_profile']:
quota.QUOTAS.register_resource_by_name(resource_name)
resource_registry.register_resource_by_name(resource_name)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.SERVICECHAIN)
@ -350,108 +345,88 @@ class ServiceChainPluginBase(service_base.ServicePluginBase):
pass
@abc.abstractmethod
@log.log
def get_servicechain_nodes(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_node(self, context, servicechain_node_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_node(self, context, servicechain_node):
pass
@abc.abstractmethod
@log.log
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_node(self, context, servicechain_node_id):
pass
@abc.abstractmethod
@log.log
def get_servicechain_specs(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_spec(self, context, servicechain_spec_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_spec(self, context, servicechain_spec):
pass
@abc.abstractmethod
@log.log
def update_servicechain_spec(self, context, servicechain_spec_id,
servicechain_spec):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_spec(self, context, servicechain_spec_id):
pass
@abc.abstractmethod
@log.log
def get_servicechain_instances(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_instance(self, context, servicechain_instance_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_instance(self, context, servicechain_instance_id):
def create_servicechain_instance(self, context, servicechain_instance):
pass
@abc.abstractmethod
@log.log
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_instance(self, context, servicechain_instance_id):
pass
@abc.abstractmethod
@log.log
def create_service_profile(self, context, service_profile):
pass
@abc.abstractmethod
@log.log
def update_service_profile(self, context, service_profile_id,
service_profile):
pass
@abc.abstractmethod
@log.log
def delete_service_profile(self, context, service_profile_id):
pass
@abc.abstractmethod
@log.log
def get_service_profile(self, context, service_profile_id, fields=None):
pass
@abc.abstractmethod
@log.log
def get_service_profiles(self, context, filters=None, fields=None):
pass

View File

@ -12,7 +12,8 @@
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_openvswitch as base
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import (
mech_openvswitch as base)
from oslo_log import log
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base

View File

@ -10,6 +10,17 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.common import constants
constants.GROUP_POLICY = "GROUP_POLICY"
constants.SERVICECHAIN = "SERVICECHAIN"
GBP_PREFIXES = {
constants.GROUP_POLICY: "/grouppolicy",
constants.SERVICECHAIN: "/servicechain",
}
GP_ACTION_ALLOW = 'allow'
GP_ACTION_REDIRECT = 'redirect'

View File

@ -14,8 +14,10 @@ import copy
import netaddr
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import config
from apicapi import apic_manager
from keystoneclient.v2_0 import client as keyclient
from networking_cisco.plugins.ml2.drivers.cisco.apic import apic_model
from neutron.agent.linux import dhcp
from neutron.api.v2 import attributes
from neutron.common import constants as n_constants
@ -26,8 +28,6 @@ from neutron import context as nctx
from neutron.db import db_base_plugin_v2 as n_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.ml2.drivers.cisco.apic import apic_model
from neutron.plugins.ml2.drivers.cisco.apic import config
from opflexagent import constants as ofcst
from opflexagent import rpc
from oslo_concurrency import lockutils
@ -258,7 +258,7 @@ class ApicMappingDriver(api.ResourceMappingDriver,
def get_gbp_details(self, context, **kwargs):
try:
port_id = self._core_plugin._device_to_port_id(
kwargs['device'])
context, kwargs['device'])
port_context = self._core_plugin.get_bound_port_context(
context, port_id, kwargs['host'])
if not port_context:

View File

@ -72,4 +72,4 @@ class ApicNameManager(object):
return obj['name'].startswith(APIC_REFERENCE_PREFIX)
def _extract_apic_reference(self, obj):
return obj['name'][len(APIC_REFERENCE_PREFIX):]
return obj['name'][len(APIC_REFERENCE_PREFIX):]

View File

@ -1,41 +0,0 @@
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
restproxy_opts = [
cfg.StrOpt('server', default='localhost:8800',
help=_("IP Address and Port of Nuage's VSD server")),
cfg.StrOpt('serverauth', default='username:password',
secret=True,
help=_("Username and password for authentication")),
cfg.BoolOpt('serverssl', default=False,
help=_("Boolean for SSL connection with VSD server")),
cfg.StrOpt('base_uri', default='/',
help=_("Nuage provided base uri to reach out to VSD")),
cfg.StrOpt('organization', default='system',
help=_("Organization name in which VSD will orchestrate "
"network resources using openstack")),
cfg.StrOpt('auth_resource', default='',
help=_("Nuage provided uri for initial authorization to "
"access VSD")),
cfg.StrOpt('application', default = 'gbp_application',
help=_("Application name in which group based policy "
"is going to get defined"))
]
def nuage_register_cfg_opts():
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")

View File

@ -1,116 +0,0 @@
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from gbpservice.neutron.services.grouppolicy.drivers import (
resource_mapping as api)
from gbpservice.neutron.services.grouppolicy.drivers.nuage.common import config
LOG = logging.getLogger(__name__)
class NuageGBPDriver(api.ResourceMappingDriver):
instance = None
def initialize(self):
LOG.debug('Initializing Nuage GBP driver')
super(NuageGBPDriver, self).initialize()
config.nuage_register_cfg_opts()
self.nuageclient_init()
NuageGBPDriver.instance = self
LOG.debug('Initialization of Nuage GBP is complete')
def nuageclient_init(self):
server = cfg.CONF.RESTPROXY.server
serverauth = cfg.CONF.RESTPROXY.serverauth
serverssl = cfg.CONF.RESTPROXY.serverssl
base_uri = cfg.CONF.RESTPROXY.base_uri
auth_resource = cfg.CONF.RESTPROXY.auth_resource
organization = cfg.CONF.RESTPROXY.organization
nuageclient = importutils.import_module('nuagenetlib.nuageclient')
self.nuageclient = nuageclient.NuageClient(server, base_uri,
serverssl, serverauth,
auth_resource,
organization)
self.nuage_app = cfg.CONF.RESTPROXY.application
self.nuageclient.create_application(
self.nuage_app)
@staticmethod
def get_initialized_instance():
return NuageGBPDriver.instance
def create_policy_target_group_postcommit(self, context):
self.nuageclient.create_ptg_postcommit(context,
self.nuage_app)
def update_policy_target_group_postcommit(self, context):
prs = None
curr_provided_prs = context.current[
'provided_policy_rule_sets']
curr_consumed_prs = context.current[
'consumed_policy_rule_sets']
if curr_provided_prs and not curr_consumed_prs:
prs = context._plugin.get_policy_rule_set(
context._plugin_context, curr_provided_prs[0])
elif curr_consumed_prs and not curr_provided_prs:
prs = context._plugin.get_policy_rule_set(
context._plugin_context, curr_consumed_prs[0])
if (prs and prs['providing_policy_target_groups'] and
prs['consuming_policy_target_groups']):
self.nuageclient.update_ptg_postcommit(context, prs,
self.nuage_app)
def delete_policy_target_group_postcommit(self, context):
self.nuageclient.delete_ptg_postcommit(context,
self.nuage_app)
def create_policy_rule_postcommit(self, context):
action = context._plugin.get_policy_action(
context._plugin_context, context.current['policy_actions'][0])
classifier = context._plugin.get_policy_classifier(
context._plugin_context,
context.current['policy_classifier_id'])
self.nuageclient.create_policyrule_postcommit(context, action,
classifier,
self.nuage_app)
def delete_policy_rule_postcommit(self, context):
self.nuageclient.delete_policyrule_postcommit(context,
self.nuage_app)
def create_policy_rule_set_postcommit(self, context):
# Should not call parent class's method here
# It will crib about duplicate db entry otherwise
# as it would have been called already
return
def create_nuage_policy_target(self, context, port):
# Retrieve PTG
fixed_ips = port['fixed_ips']
if fixed_ips:
port_subnet_id = fixed_ips[0]['subnet_id']
ptg = self._get_ptg_by_subnet(context, port_subnet_id)
if ptg:
self.nuageclient.create_policytarget(context,
port,
ptg,
self.nuage_app)

View File

@ -1,431 +0,0 @@
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib
import requests
import urlparse
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from gbpservice.neutron.services.grouppolicy.common import exceptions
SERVICE_CONTROLLER_OPTIONS = [
cfg.StrOpt('service_controller_ip',
help=_('One Convergence NVSD Service Controller IP Address')),
cfg.StrOpt('service_controller_port',
help=_('One Convergence NVSD Service Controller Port Number')),
cfg.StrOpt('request_retries',
help=_('One Convergence NVSD Service Controller API '
'request retries')),
cfg.StrOpt('request_timeout',
help=_('One Convergence NVSD Service Controller API '
'request timeout')),
cfg.StrOpt('api_version',
default='1.0',
help=_('One Convergence NVSD Service Controller API Version')),
]
cfg.CONF.register_opts(SERVICE_CONTROLLER_OPTIONS, "NVSD_SERVICE_CONTROLLER")
LOG = logging.getLogger(__name__)
NVSD_ENDPOINT = "/nvsd_connectivity_port"
NVSD_ENDPOINT_GROUP = "/nvsd_connectivity_portgroup"
NVSD_CONTRACT = "/nvsd_connectivity_contract"
NVSD_POLICY = "/nvsd_connectivity_policy"
NVSD_POLICY_ACTION = "/nvsd_connectivity_action"
NVSD_POLICY_CLASSIFIER = "/nvsd_connectivity_classifier"
NVSD_POLICY_RULE = "/nvsd_connectivity_rule"
NVSD_SERVICE = "/service"
ADMIN_URL = "&is_admin=true"
API_TENANT_USER = "?tenant_id=%s&user_id=%s"
class GroupPolicyException(exceptions.GroupPolicyException):
"""Base for policy driver exceptions returned to user."""
message = _("Unexpected response code %(status)s from NVSD "
"Service Controller for %(method)s to %(url)s")
class NVSDServiceController(object):
"""Encapsulates the One Convergence NVSD Service Controller details.
Uses python-requests library to perform API request to One Convergence
NVSD Service Controller.
"""
def __init__(self):
self._host = cfg.CONF.NVSD_SERVICE_CONTROLLER.service_controller_ip
self._port = cfg.CONF.NVSD_SERVICE_CONTROLLER.service_controller_port
self._retries = cfg.CONF.NVSD_SERVICE_CONTROLLER.request_retries
self._request_timeout = float(cfg.CONF.NVSD_SERVICE_CONTROLLER.
request_timeout)
self.service_api_url = 'http://' + self._host + ':' + str(self._port)
self.pool = requests.Session()
def do_request(self, method, url=None, headers=None, data=None,
timeout=10):
response = self.pool.request(method, url=url,
headers=headers, data=data,
timeout=timeout)
return response
def request(self, method, uri, context, body="",
content_type="application/json", filters={}):
"""Issue a request to NVSD Service Controller."""
headers = {"Content-Type": content_type}
api_version = "/v" + cfg.CONF.NVSD_SERVICE_CONTROLLER.api_version
uri = api_version + uri
if context.is_admin:
uri = uri + ADMIN_URL
if filters.get('tenant_id'):
uri = uri + "&filter_tenant_id=%s" % filters.get('tenant_id')[0]
url = urlparse.urljoin(self.service_api_url, uri)
response = None
try:
response = self.do_request(method, url=url, headers=headers,
data=body,
timeout=self._request_timeout)
LOG.debug("Request: %(method)s %(uri)s executed",
{'method': method, 'uri': self.service_api_url + uri})
except httplib.IncompleteRead as err:
response = err.partial
except Exception as err:
LOG.error(_("Request failed in NVSD Service Controller. "
"Error : %s"), err)
if response is None:
# Request was timed out.
LOG.error(_("Response is Null, Request for method : %(method)s to "
"%(uri)s Timed out"), {'method': method, 'uri': uri})
raise GroupPolicyException(status="TimedOut", method=method,
url=self.service_api_url + uri)
status = response.status_code
#Not Found (404) is OK for DELETE. Ignore it here
if method == 'DELETE' and status == 404:
return
elif status not in (requests.codes.ok, requests.codes.created,
requests.codes.no_content):
LOG.error(_("Unexpected response code %(status)s from NVSD "
"Service Controller for %(method)s to %(url)s"),
{'status': status, 'method': method, 'url': url})
raise GroupPolicyException(status=status, method=method,
url=self.service_api_url + uri)
else:
LOG.debug("Success: %(method)s %(url)s status=%(status)s",
{'method': method, 'url': self.service_api_url + uri,
'status': status})
response.body = response.content
return response
class NVSDServiceApi(object):
"""Invokes One Convergence NVSD Service Controller API.
Invokes the appropriate One Convergence NVSD Service Controller API for
each of the Openstack Group Based Policy API operation. Maps the Openstack
Group Policy parameters to One Convergence NVSD API parameters.
"""
def __init__(self):
self.nvsd_service_controller = NVSDServiceController()
def create_policy_classifier(self, context, policy_classifier):
body = copy.deepcopy(policy_classifier)
body.update({"port": policy_classifier.get("port_range")})
tenant_id = context.tenant_id
uri = (NVSD_POLICY_CLASSIFIER + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_classifiers(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_CLASSIFIER + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policy_classifier(self, context, classifier_id):
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def update_policy_classifier(self, context, classifier):
tenant_id = context.tenant_id
classifier_id = classifier.get('id')
body = copy.deepcopy(classifier)
body.update({"port": classifier.get("port_range")})
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def delete_policy_classifier(self, context, classifier_id):
tenant_id = context.tenant_id
uri = (NVSD_POLICY_CLASSIFIER + "/%s?tenant_id=%s&user_id=%s" %
(classifier_id, tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy_rule(self, context, rule):
'''
body = copy.deepcopy(rule)
body.update({'classifier': rule.get('policy_classifier_id'),
'actions': rule.get('policy_actions', []),
'policies_attached': []})
'''
tenant_id = context.tenant_id
uri = (NVSD_POLICY_RULE + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(rule))
return response.json()
def update_policy_rule(self, context, rule):
tenant_id = context.tenant_id
rule_id = rule.get('id')
body = copy.deepcopy(rule)
body.update({'classifier': rule.get('policy_classifier_id'),
'actions': rule.get('policy_actions', [])})
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_rule(self, context, rule_id):
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policy_rules(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_RULE + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy_rule(self, context, rule_id):
uri = (NVSD_POLICY_RULE + "/%s?tenant_id=%s&user_id=%s" %
(rule_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy_action(self, context, action):
body = copy.deepcopy(action)
action_type = action.get("action_type")
if action_type.lower() == "redirect":
body["action_type"] = "l2redirect"
tenant_id = context.tenant_id
uri = (NVSD_POLICY_ACTION + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def update_policy_action(self, context, policy_action):
tenant_id = context.tenant_id
action_id = policy_action.get('id')
body = copy.deepcopy(policy_action)
action_type = policy_action.get("action_type")
if action_type.lower() == "redirect":
body["action_type"] = "l2redirect"
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_policy_action(self, context, action_id):
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri)
return response.json()
def get_policy_actions(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY_ACTION + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy_action(self, context, action_id):
uri = (NVSD_POLICY_ACTION + "/%s?tenant_id=%s&user_id=%s" %
(action_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_endpointgroup(self, context, endpointgroup):
uri = (NVSD_ENDPOINT_GROUP + "?tenant_id=%s&user_id=%s" %
(context.tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context,
jsonutils.dumps(endpointgroup))
return response.json()
def get_endpointgroups(self, context, tenant_id, filters={}):
uri = (NVSD_ENDPOINT_GROUP + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_endpointgroup(self, context, endpointgroup_id):
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def update_endpointgroup(self, context, endpointgroup):
tenant_id = context.tenant_id
endpointgroup_id = endpointgroup.get('id')
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context,
jsonutils.dumps(endpointgroup))
return response.json()
def delete_endpointgroup(self, context, endpointgroup_id):
uri = (NVSD_ENDPOINT_GROUP + "/%s?tenant_id=%s&user_id=%s" %
(endpointgroup_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_endpoint(self, context, endpoint):
body = copy.deepcopy(endpoint)
body.update({'connectivity_portgroup_id':
endpoint.get('policy_target_group_id')})
tenant_id = context.tenant_id
uri = (NVSD_ENDPOINT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("POST", uri, context,
jsonutils.dumps(body))
return response.json()
def update_endpoint(self, context, endpoint):
tenant_id = context.tenant_id
endpoint_id = endpoint.get('id')
body = copy.deepcopy(endpoint)
body.update({'connectivity_portgroup_id':
endpoint.get('policy_target_group_id')})
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, tenant_id, context.user))
response = self.nvsd_service_controller.request("PUT", uri, context,
jsonutils.dumps(body))
return response.json()
def get_endpoint(self, context, endpoint_id):
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_endpoints(self, context, tenant_id, filters={}):
uri = (NVSD_ENDPOINT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_endpoint(self, context, endpoint_id):
uri = (NVSD_ENDPOINT + "/%s?tenant_id=%s&user_id=%s" %
(endpoint_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
return
def create_contract(self, context, contract):
tenant_id = context.tenant_id
uri = (NVSD_CONTRACT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context, jsonutils.dumps(contract))
return response.json()
def update_contract(self, context, contract):
tenant_id = context.tenant_id
contract_id = contract.get('id')
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context, jsonutils.dumps(contract))
return response.json()
def get_contract(self, context, contract_id):
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_contracts(self, context, tenant_id, filters={}):
uri = (NVSD_CONTRACT + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_contract(self, context, contract_id):
uri = (NVSD_CONTRACT + "/%s?tenant_id=%s&user_id=%s" %
(contract_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def create_policy(self, context, policy):
tenant_id = context.tenant_id
uri = (NVSD_POLICY + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request(
"POST", uri, context, jsonutils.dumps(policy))
return response.json()
def update_policy(self, context, policy):
tenant_id = context.tenant_id
policy_id = policy.get('id')
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, tenant_id, context.user))
response = self.nvsd_service_controller.request(
"PUT", uri, context, jsonutils.dumps(policy))
return response.json()
def get_policy(self, context, policy_id):
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def get_policys(self, context, tenant_id, filters={}):
uri = (NVSD_POLICY + "?tenant_id=%s&user_id=%s" %
(tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()
def delete_policy(self, context, policy_id):
uri = (NVSD_POLICY + "/%s?tenant_id=%s&user_id=%s" %
(policy_id, context.tenant_id, context.user))
self.nvsd_service_controller.request("DELETE", uri, context)
def get_nvsd_service(self, context, service_id):
uri = (NVSD_SERVICE + "/%s?tenant_id=%s&user_id=%s" %
(service_id, context.tenant_id, context.user))
response = self.nvsd_service_controller.request("GET", uri, context)
return response.json()

View File

@ -1,204 +0,0 @@
# Copyright 2014 OneConvergence, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import log
from oslo_log import log as logging
from oslo_utils import excutils
from gbpservice.neutron.services.grouppolicy.drivers import (
resource_mapping as res_map)
from gbpservice.neutron.services.grouppolicy.drivers.oneconvergence import (
nvsd_gbp_api as api)
LOG = logging.getLogger(__name__)
class NvsdGbpDriver(res_map.ResourceMappingDriver):
"""One Convergence NVSD Group Policy Driver for Group Policy Service Plugin
This class inherits from ResourceMappingDriver and overrides the implicit
Subnet creation for an EndPointGroup. One Convergence NVSD only supports
REDIRECT to an L2 Service at present and the Provider and Consumer PTGs
have to be on the same network and subnet. Hence, One Convergence NVSD
Group Policy Driver creates only one default L2 Policy for a tenant.
Further, the PTGs do not have a one-to-one mapping to a subnet, but rather
multiple PTGs are mapped to one subnet. One Convergence NVSD maps an PTG to
a NVSD Port Group.
"""
def __init__(self):
self.nvsd_api = api.NVSDServiceApi()
@log.log
def create_policy_target_postcommit(self, context):
super(NvsdGbpDriver, self).create_policy_target_postcommit(context)
try:
self.nvsd_api.create_endpoint(context._plugin_context,
context.current)
except Exception:
with excutils.save_and_reraise_exception():
super(NvsdGbpDriver,
self).delete_policy_target_postcommit(context)
@log.log
def update_policy_target_postcommit(self, context):
super(NvsdGbpDriver, self).update_policy_target_postcommit(context)
self.nvsd_api.update_endpoint(context._plugin_context,
context.current)
@log.log
def delete_policy_target_postcommit(self, context):
self.nvsd_api.delete_endpoint(context._plugin_context,
context.current['id'])
super(NvsdGbpDriver, self).delete_policy_target_postcommit(context)
@log.log
def create_policy_target_group_precommit(self, context):
# Reuse the previously created implicit L2 Policy for the tenant
if not context.current['l2_policy_id']:
l2ps = context._plugin.get_l2_policies(
context._plugin_context,
filters=({'description': ["Implicitly created L2 policy"],
"tenant_id": [context.current['tenant_id']]}))
if l2ps:
context.set_l2_policy_id(l2ps[0]['id'])
super(NvsdGbpDriver, self).create_policy_target_group_precommit(
context)
@log.log
def create_policy_target_group_postcommit(self, context):
subnets = context.current['subnets']
if not subnets:
if self._use_implicit_subnet(context) is True:
subnets = context.current['subnets']
l2p_id = context.current['l2_policy_id']
l2p = context._plugin.get_l2_policy(context._plugin_context,
l2p_id)
l3p_id = l2p['l3_policy_id']
l3p = context._plugin.get_l3_policy(context._plugin_context,
l3p_id)
router_id = l3p['routers'][0]
for subnet_id in subnets:
self._plug_router_to_subnet(context._plugin_context,
subnet_id, router_id)
self.nvsd_api.create_endpointgroup(context._plugin_context,
context.current)
self._handle_network_service_policy(context)
self._handle_policy_rule_sets(context)
self._update_default_security_group(context._plugin_context,
context.current['id'],
context.current['tenant_id'],
context.current['subnets'])
@log.log
def update_policy_target_group_postcommit(self, context):
super(NvsdGbpDriver,
self).update_policy_target_group_postcommit(context)
self.nvsd_api.update_endpointgroup(context._plugin_context,
context.current)
@log.log
def delete_policy_target_group_precommit(self, context):
super(NvsdGbpDriver,
self).delete_policy_target_group_precommit(context)
l2p_id = context.current['l2_policy_id']
ptgs = context._plugin.get_policy_target_groups(
context._plugin_context,
filters=({'l2_policy_id': [l2p_id]}))
for ptg in ptgs:
if ptg['id'] != context.current['id']:
context.current['l2_policy_id'] = None
return
@log.log
def delete_policy_target_group_postcommit(self, context):
try:
self._cleanup_network_service_policy(context,
context.current,
context.nsp_cleanup_ipaddress,
context.nsp_cleanup_fips)
self._cleanup_redirect_action(context)
# Cleanup SGs
self._unset_sg_rules_for_subnets(
context, context.current['subnets'],
context.current['provided_policy_rule_sets'],
context.current['consumed_policy_rule_sets'])
except Exception as err:
LOG.error(_("Cleanup of Redirect Action failed. "
"Error : %s"), err)
try:
l2p_id = context.current['l2_policy_id']
l3p = self._get_l3p_for_l2policy(context, l2p_id)
router_id = l3p['routers'][0]
for subnet_id in context.current['subnets']:
self._cleanup_subnet(context, subnet_id, router_id)
self._delete_default_security_group(
context._plugin_context, context.current['id'],
context.current['tenant_id'])
except Exception as err:
LOG.error(_("Cleanup of Policy target group failed. "
"Error : %s"), err)
self.nvsd_api.delete_endpointgroup(context._plugin_context,
context.current['id'])
@log.log
def create_l2_policy_postcommit(self, context):
super(NvsdGbpDriver, self).create_l2_policy_postcommit(context)
@log.log
def delete_l2_policy_postcommit(self, context):
super(NvsdGbpDriver, self).delete_l2_policy_postcommit(context)
@log.log
def create_policy_classifier_postcommit(self, context):
super(NvsdGbpDriver, self).create_policy_classifier_postcommit(context)
self.nvsd_api.create_policy_classifier(context._plugin_context,
context.current)
@log.log
def update_policy_classifier_postcommit(self, context):
super(NvsdGbpDriver, self).update_policy_classifier_postcommit(context)
self.nvsd_api.update_policy_classifier(context._plugin_context,
context.current)
@log.log
def delete_policy_classifier_postcommit(self, context):
self.nvsd_api.delete_policy_classifier(context._plugin_context,
context.current['id'])
super(NvsdGbpDriver, self).delete_policy_classifier_postcommit(context)
def _use_implicit_subnet(self, context):
# One Convergence NVSD does not support REDIRECT to a different Subnet
# at present. So restricting to use same subnet for a given L2 Policy
ptgs = context._plugin.get_policy_target_groups(
context._plugin_context, filters=(
{'l2_policy_id': [context.current['l2_policy_id']]}))
for ptg in ptgs:
if ptg['subnets']:
context.add_subnet(ptg['subnets'][0])
return False
# Create a new Subnet for first PTG using the L2 Policy
super(NvsdGbpDriver, self)._use_implicit_subnet(context)
return True
def _cleanup_subnet(self, context, subnet_id, router_id):
# Cleanup is performed only when the last PTG on subnet is removed
ptgs = context._plugin.get_policy_target_groups(
context._plugin_context)
for ptg in ptgs:
ptg_subnets = ptg['subnets']
if subnet_id in ptg_subnets:
return
super(NvsdGbpDriver, self)._cleanup_subnet(context._plugin_context,
subnet_id, router_id)

View File

@ -1769,4 +1769,4 @@ def default_extension_behavior(table, keys=None):
# Now exec the actual function for postprocessing
func(inst, *args)
return inner
return wrap
return wrap

View File

@ -48,6 +48,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
db_group_policy_mapping.GroupPolicyMappingDbMixin.
"""
_supported_extension_aliases = ["group-policy", "group-policy-mapping"]
path_prefix = gp_cts.GBP_PREFIXES[pconst.GROUP_POLICY]
@property
def supported_extension_aliases(self):

View File

@ -113,4 +113,4 @@ class NoopDriver(object):
@log.log
def delete_service_profile_postcommit(self, context):
pass
pass

View File

@ -1,527 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
import eventlet
from heatclient import client as heat_client
from neutron.api.v2 import attributes
from neutron.common import log
from neutron.db import model_base
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import sqlalchemy as sa
from gbpservice.neutron.services.grouppolicy.drivers.oneconvergence import (
nvsd_gbp_api as napi)
from gbpservice.neutron.services.servicechain.plugins.msc.drivers import (
simplechain_driver as simplechain_driver)
eventlet.monkey_patch()
LOG = logging.getLogger(__name__)
class ServiceChainInstancePolicyMap(model_base.BASEV2):
"""NVSD Policy attached to the Service Chain Instance."""
__tablename__ = 'nvsd_sc_instance_policies'
instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
policy_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
class ServiceChainInstanceVipEPMap(model_base.BASEV2):
"""NVSD Policy attached to the Service Chain Instance."""
__tablename__ = 'nvsd_sc_instance_vip_eps'
instance_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
vip_port = sa.Column(sa.String(36),
nullable=False, primary_key=True)
nvsd_ep_id = sa.Column(sa.String(36),
nullable=False, primary_key=True)
class PendingServiceChainInsertions(object):
"""Encapsulates a ServiceChain Insertion Operation"""
def __init__(self, context, node_stacks, chain_instance_id,
provider_ptg_id, consumer_ptg_id, classifier_id):
self.context = context
self.node_stacks = node_stacks
self.chain_instance_id = chain_instance_id
self.provider_ptg_id = provider_ptg_id
self.consumer_ptg_id = consumer_ptg_id
self.classifier_id = classifier_id
class OneconvergenceServiceChainDriver(simplechain_driver.SimpleChainDriver):
STATUSES = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE
) = ('CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE')
def __init__(self):
self.pending_chain_insertions = list()
self.nvsd_api = napi.NVSDServiceApi()
@log.log
def create_servicechain_node_precommit(self, context):
pass
@log.log
def create_servicechain_spec_precommit(self, context):
super(OneconvergenceServiceChainDriver,
self).create_servicechain_spec_precommit(context)
@log.log
def update_servicechain_spec_postcommit(self, context):
filters = {'servicechain_spec': [context._original_sc_spec['id']]}
sc_instances = context._plugin.get_servicechain_instances(
context._plugin_context, filters)
if sc_instances:
self._update_servicechain_instance(context,
sc_instances[0],
context._sc_spec)
@log.log
def create_servicechain_instance_postcommit(self, context):
self._create_servicechain_instance_postcommit(context)
node_stacks = self._get_chain_stacks(context._plugin_context.session,
context.current['id'])
thread_context = copy.copy(context._plugin_context)
pendinginsertion = PendingServiceChainInsertions(
thread_context,
node_stacks,
context.current['id'],
context.current['provider_ptg_id'],
context.current['consumer_ptg_id'],
context.current['classifier_id'])
eventlet.spawn_n(self._process_chain_processing, pendinginsertion)
@log.log
def update_servicechain_instance_postcommit(self, context):
original_spec_ids = context._original_sc_instance.get(
'servicechain_specs')
new_spec_ids = context._sc_instance.get('servicechain_specs')
if set(original_spec_ids) != set(new_spec_ids):
for new_spec_id in new_spec_ids:
newspec = context._plugin.get_servicechain_spec(
context._plugin_context, new_spec_id)
self._update_servicechain_instance(context, context.current,
newspec)
@log.log
def delete_servicechain_instance_postcommit(self, context):
self.delete_nvsd_policy(context, context.current['id'])
self._delete_chain_policy_map(context._plugin_context.session,
context.current['id'])
self.delete_nvsd_ep(context, context.current['id'])
super(OneconvergenceServiceChainDriver,
self).delete_servicechain_instance_postcommit(context)
def _get_l2p(self, context, l2p_id):
return self._get_resource(self._grouppolicy_plugin,
context._plugin_context,
'l2_policy',
l2p_id)
def _get_member_ports(self, context, ptg_id):
ptg = self._get_ptg(context, ptg_id)
pt_ids = ptg.get("policy_targets")
member_ports = []
for pt_id in pt_ids:
pt = self._get_pt(context, pt_id)
port_id = pt.get("port_id")
member_ports.append(port_id)
return member_ports
def _fetch_template_and_params(self, context, sc_instance,
sc_spec, sc_node):
stack_template = sc_node.get('config')
if not stack_template:
return
stack_template = jsonutils.loads(stack_template)
config_param_values = sc_instance.get('config_param_values', {})
stack_params = {}
# config_param_values has the parameters for all Nodes. Only apply
# the ones relevant for this Node
if config_param_values:
config_param_values = jsonutils.loads(config_param_values)
config_param_names = sc_spec.get('config_param_names', [])
if config_param_names:
config_param_names = ast.literal_eval(config_param_names)
# TODO(magesh):Process on the basis of ResourceType rather than Name
provider_ptg_id = sc_instance.get("provider_ptg_id")
node_params = (stack_template.get('Parameters')
or stack_template.get('parameters'))
if not node_params:
return (stack_template, stack_params)
for key in list(set(config_param_names) & set(node_params.keys())):
if key == "PoolMemberIPs":
value = self._get_member_ips(context, provider_ptg_id)
# TODO(Magesh):Return one value for now
value = value[0] if value else ""
config_param_values[key] = value
elif key == "pool_member_port":
value = self._get_member_ports(context, provider_ptg_id)
# TODO(Magesh):Return one value for now
value = value[0] if value else ""
config_param_values[key] = value
elif key == "Subnet":
value = self._get_ptg_subnet(context, provider_ptg_id)
config_param_values[key] = value
elif key == "vip_port":
value = self._create_lb_service_port(context, provider_ptg_id)
config_param_values[key] = value
for parameter in list(set(config_param_values.keys()) &
set(node_params.keys())):
if parameter in node_params.keys():
stack_params[parameter] = config_param_values[parameter]
return (stack_template, stack_params)
def _create_servicechain_instance_stacks(self, context, sc_node_ids,
sc_instance, sc_spec):
for sc_node_id in sc_node_ids:
sc_node = context._plugin.get_servicechain_node(
context._plugin_context, sc_node_id)
stack_template, stack_params = self._fetch_template_and_params(
context, sc_instance, sc_spec, sc_node)
stack = HeatClient(context._plugin_context).create(
"stack_" + sc_instance['name'] + sc_node['name']
+ sc_node['id'][:5],
stack_template,
stack_params)
self._insert_chain_stack_db(context._plugin_context.session,
sc_instance['id'], stack['stack']['id'])
def _create_servicechain_instance_postcommit(self, context):
sc_instance = context.current
sc_spec_ids = sc_instance.get('servicechain_specs')
for sc_spec_id in sc_spec_ids:
sc_spec = context._plugin.get_servicechain_spec(
context._plugin_context, sc_spec_id)
sc_node_ids = sc_spec.get('nodes')
self._create_servicechain_instance_stacks(context, sc_node_ids,
sc_instance, sc_spec)
def _create_port(self, plugin_context, attrs):
return self._create_resource(self._core_plugin, plugin_context, 'port',
attrs)
def _create_resource(self, plugin, context, resource, attrs):
action = 'create_' + resource
obj_creator = getattr(plugin, action)
obj = obj_creator(context, {resource: attrs})
return obj
def _delete_resource(self, plugin, context, resource, resource_id):
action = 'delete_' + resource
obj_deleter = getattr(plugin, action)
obj_deleter(context, resource_id)
def _create_lb_service_port(self, context, ptg_id):
ptg = self._get_ptg(context, ptg_id)
subnet_id = ptg.get("subnets")[0]
l2p_id = ptg['l2_policy_id']
l2p = self._get_l2p(context, l2p_id)
attrs = {'tenant_id': context.current['tenant_id'],
'name': 'ep_' + context.current['name'],
'network_id': l2p['network_id'],
'fixed_ips': [{"subnet_id": subnet_id}],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'device_id': '',
'device_owner': 'compute:service',
'port_security_enabled': False,
'security_groups': [],
'admin_state_up': True}
port = self._create_port(context._plugin_context, attrs)
port_id = port['id']
body = {'tenant_id': context._plugin_context.tenant,
'user_id': context._plugin_context.user,
'policy_target_group_id': ptg_id,
'port_id': port_id}
nvsd_ep = self.nvsd_api.create_endpoint(context._plugin_context, body)
ep_id = nvsd_ep['id']
self._add_chain_nvsd_vip_ep_map(context._plugin_context.session,
context.current['id'],
ep_id,
port_id)
return port_id
def _delete_port(self, plugin_context, port_id):
self._delete_resource(self._core_plugin,
plugin_context, 'port', port_id)
def delete_nvsd_ep(self, context, sc_instance_id):
chain_nvsd_ep_map = self._get_chain_nvsd_ep_map(
context._plugin_context.session, sc_instance_id)
if not chain_nvsd_ep_map:
return
nvsd_ep_id = chain_nvsd_ep_map.nvsd_ep_id
vip_port = chain_nvsd_ep_map.vip_port
self.nvsd_api.delete_endpoint(context._plugin_context,
nvsd_ep_id)
try:
self._delete_port(context._plugin_context, vip_port)
except Exception:
pass
self._delete_chain_nvsd_ep_map(context._plugin_context.session,
sc_instance_id)
def _update_servicechain_instance(self, context, sc_instance, newspec):
self._delete_servicechain_instance_stacks(context._plugin_context,
sc_instance['id'])
#Delete Policy and create new policy
sc_node_ids = newspec.get('nodes')
self._create_servicechain_instance_stacks(context,
sc_node_ids,
sc_instance)
node_stacks = self._get_chain_stacks(context._plugin_context.session,
context.current['id'])
thread_context = copy.copy(context._plugin_context)
pendinginsertion = PendingServiceChainInsertions(
thread_context,
node_stacks,
context.current['id'],
context.current['provider_ptg_id'],
context.current['consumer_ptg_id'],
context.current['classifier_id'])
eventlet.spawn_n(self._process_chain_processing, pendinginsertion)
def _delete_chain_policy_map(self, session, sc_instance_id):
with session.begin(subtransactions=True):
chain_policy_map = session.query(
ServiceChainInstancePolicyMap).filter_by(
instance_id=sc_instance_id).first()
if chain_policy_map:
session.delete(chain_policy_map)
def _add_chain_policy_map(self, session, sc_instance_id, policy_id):
with session.begin(subtransactions=True):
chain_policy_map = ServiceChainInstancePolicyMap(
instance_id=sc_instance_id,
policy_id=policy_id)
session.add(chain_policy_map)
def _get_chain_policy_map(self, session, sc_instance_id):
with session.begin(subtransactions=True):
chain_policy_map = session.query(
ServiceChainInstancePolicyMap).filter_by(
instance_id=sc_instance_id).first()
return chain_policy_map
def _delete_chain_nvsd_ep_map(self, session, sc_instance_id):
with session.begin(subtransactions=True):
chain_nvsd_ep_map = session.query(
ServiceChainInstanceVipEPMap).filter_by(
instance_id=sc_instance_id).first()
if chain_nvsd_ep_map:
session.delete(chain_nvsd_ep_map)
def _add_chain_nvsd_vip_ep_map(self, session, sc_instance_id, nvsd_ep_id,
port_id):
with session.begin(subtransactions=True):
chain_nvsd_ep_map = ServiceChainInstanceVipEPMap(
instance_id=sc_instance_id,
nvsd_ep_id=nvsd_ep_id,
vip_port=port_id)
session.add(chain_nvsd_ep_map)
def _get_chain_nvsd_ep_map(self, session, sc_instance_id):
with session.begin(subtransactions=True):
chain_nvsd_ep_map = session.query(
ServiceChainInstanceVipEPMap).filter_by(
instance_id=sc_instance_id).first()
return chain_nvsd_ep_map
def _process_chain_processing(self, pending_chain):
while True:
if self._perform_service_insertion(pending_chain):
return
def nvsd_get_service(self, context, service_id):
return self.nvsd_api.get_nvsd_service(context,
service_id)
def create_nvsd_policy(self, context, left_group, right_group,
classifier_id, nvsd_action_list):
#Create rule and policy in SC with the classifier and action list
rule_ids = []
for action in nvsd_action_list:
body = {'tenant_id': context.tenant,
'user_id': context.user,
'classifier': classifier_id,
'actions': [action],
'policies_attached': []}
rule = self.nvsd_api.create_policy_rule(context,
body)
rule_ids.append(rule.get("id"))
body = {'tenant_id': context.tenant,
'user_id': context.user,
'left_group': left_group,
'right_group': right_group,
'rules': rule_ids}
nvsd_policy = self.nvsd_api.create_policy(context,
body)
return nvsd_policy.get('id')
def delete_nvsd_policy(self, context, sc_instance_id):
chain_nvsd_policy_map = self._get_chain_policy_map(
context._plugin_context.session, sc_instance_id)
if not chain_nvsd_policy_map:
return
nvsd_policy_id = chain_nvsd_policy_map.policy_id
nvsd_policy = self.nvsd_api.get_policy(context._plugin_context,
nvsd_policy_id)
self.nvsd_api.delete_policy(context._plugin_context,
nvsd_policy_id)
for rule_id in nvsd_policy.get("rules"):
rule = self.nvsd_api.get_policy_rule(context._plugin_context,
rule_id)
self.nvsd_api.delete_policy_rule(context._plugin_context, rule_id)
for action_id in rule.get("actions"):
self.nvsd_api.delete_policy_action(context._plugin_context,
action_id)
def checkStackStatus(self, context, node_stacks):
for node_stack in node_stacks:
stack = HeatClient(context).stacks.get(node_stack.stack_id)
#CREATE_COMPLETE, CREATE_IN_PROGRESS, CREATE_FAILED
if stack.stack_status == self.CREATE_IN_PROGRESS:
return self.CREATE_IN_PROGRESS
elif stack.stack_status == self.CREATE_FAILED:
return self.CREATE_FAILED
elif stack.stack_status != self.CREATE_COMPLETE:
return self.CREATE_FAILED
return self.CREATE_COMPLETE
def _fetch_serviceids_from_stack(self, context, node_stacks,
chain_instance_id):
service_ids = []
for node_stack in node_stacks:
stack_resources = HeatClient(context).resources.list(
node_stack.stack_id)
for resource in stack_resources:
if resource.resource_type == "OC::ES::Service":
service_id = resource.physical_resource_id
service_ids.append(service_id)
break
return service_ids
def create_nvsd_action(self, context, action_body):
return self.nvsd_api.create_policy_action(context, action_body)
def _create_nvsd_services_action(self, context, service_ids):
nvsd_action_list = []
copy_action = None
l2redirect_action = None
for service_id in service_ids:
service = self.nvsd_get_service(context, service_id)
if service['insertion_mode'] == "L2":
if not l2redirect_action:
l2redirect_action = {"action_type": "L2REDIRECT",
'tenant_id': context.tenant,
'user_id': context.user,
"action_value": service_id}
else:
if l2redirect_action.get("action_value"):
l2redirect_action['action_value_list'] = [{
"service": l2redirect_action[
'action_value']}]
del l2redirect_action['action_value']
else:
l2redirect_action['action_value_list'].append({
"service": service_id})
elif service['insertion_mode'] == "TAP":
copy_action = {"action_type": "TAP",
'tenant_id': context.tenant,
'user_id': context.user,
"action_value": service_id}
# Supporting only one TAP in a chain
if copy_action:
action = self.create_nvsd_action(context, copy_action)
nvsd_action_list.append(action['id'])
if l2redirect_action:
action = self.create_nvsd_action(context, l2redirect_action)
nvsd_action_list.append(action['id'])
return nvsd_action_list
def _perform_service_insertion(self, pending_chain):
context = pending_chain.context
node_stacks = pending_chain.node_stacks
chain_instance_id = pending_chain.chain_instance_id
status = self.checkStackStatus(context, node_stacks)
if status == self.CREATE_IN_PROGRESS:
return False
elif status == self.CREATE_FAILED:
# TODO(Magesh): Status has to be added to ServiceChainInstance
# Update the Status to ERROR at this point
return True
# Services are created by now. Determine Service IDs an setup
# Traffic Steering.
service_ids = self._fetch_serviceids_from_stack(context, node_stacks,
chain_instance_id)
nvsd_action_list = self._create_nvsd_services_action(context,
service_ids)
left_group = pending_chain.consumer_ptg_id
right_group = pending_chain.provider_ptg_id
classifier_id = pending_chain.classifier_id
if nvsd_action_list:
policy_id = self.create_nvsd_policy(context, left_group,
right_group, classifier_id,
nvsd_action_list)
# TODO(Magesh): Need to store actions and rules also, because
# cleanup will be missed if policy create failed
self._add_chain_policy_map(
context.session, chain_instance_id, policy_id)
return True
class HeatClient:
def __init__(self, context, password=None):
api_version = "1"
endpoint = "%s/%s" % (cfg.CONF.servicechain.heat_uri, context.tenant)
kwargs = {
'token': context.auth_token,
'username': context.user_name,
'password': password
}
self.client = heat_client.Client(api_version, endpoint, **kwargs)
self.stacks = self.client.stacks
self.resources = self.client.resources
def create(self, name, data, parameters=None):
fields = {
'stack_name': name,
'timeout_mins': 10,
'disable_rollback': True,
'password': data.get('password')
}
fields['template'] = data
fields['parameters'] = parameters
return self.stacks.create(**fields)
def list(self, stack_id):
return self.resources.list(stack_id)

View File

@ -11,10 +11,12 @@
# under the License.
from neutron.common import log
from neutron.plugins.common import constants as pconst
from oslo_log import log as logging
from oslo_utils import excutils
import gbpservice.neutron.db.servicechain_db as servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.servicechain.plugins.msc import (
context as servicechain_context)
from gbpservice.neutron.services.servicechain.plugins.msc import (
@ -32,6 +34,7 @@ class ServiceChainPlugin(servicechain_db.ServiceChainDbPlugin,
"""
supported_extension_aliases = ["servicechain"]
path_prefix = gp_cts.GBP_PREFIXES[pconst.SERVICECHAIN]
def __init__(self):
self.driver_manager = manager.DriverManager()

View File

@ -59,4 +59,4 @@ class HeatClient:
"not found at cleanup"), {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)
return self.stacks.get(stack_id)

View File

@ -11,12 +11,14 @@
# under the License.
from neutron.common import log
from neutron.plugins.common import constants as pconst
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from gbpservice.common import utils
from gbpservice.neutron.db import servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
from gbpservice.neutron.services.servicechain.plugins.ncp import (
context as ctx)
from gbpservice.neutron.services.servicechain.plugins.ncp import (
@ -37,6 +39,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
"""
supported_extension_aliases = ["servicechain"]
path_prefix = gp_cts.GBP_PREFIXES[pconst.SERVICECHAIN]
def __init__(self):
self.driver_manager = manager.NodeDriverManager()

View File

@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import uuidutils
from oslo_utils import uuidutils
_uuid = uuidutils.generate_uuid

View File

@ -20,17 +20,20 @@ from neutron import context
from neutron.db import api as db_api
from neutron.db import model_base
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron import policy
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db import servicechain_db as svcchain_db
from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.extensions import servicechain as service_chain
from gbpservice.neutron.services.grouppolicy.common import (
constants as gp_constants)
import gbpservice.neutron.tests
from gbpservice.neutron.tests.unit import common as cm
@ -162,10 +165,10 @@ class ApiManagerMixin(object):
class GroupPolicyDBTestBase(ApiManagerMixin):
resource_prefix_map = dict(
(k, constants.COMMON_PREFIXES[constants.SERVICECHAIN])
(k, gp_constants.GBP_PREFIXES[constants.SERVICECHAIN])
for k in service_chain.RESOURCE_ATTRIBUTE_MAP.keys())
resource_prefix_map.update(dict(
(k, constants.COMMON_PREFIXES[constants.GROUP_POLICY])
(k, gp_constants.GBP_PREFIXES[constants.GROUP_POLICY])
for k in gpolicy.RESOURCE_ATTRIBUTE_MAP.keys()
))
@ -258,7 +261,8 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
class GroupPolicyDBTestPlugin(gpdb.GroupPolicyDbPlugin):
supported_extension_aliases = ['group-policy']
supported_extension_aliases = ['group-policy']
path_prefix = "/grouppolicy"
DB_GP_PLUGIN_KLASS = (GroupPolicyDBTestPlugin.__module__ + '.' +
@ -267,7 +271,8 @@ DB_GP_PLUGIN_KLASS = (GroupPolicyDBTestPlugin.__module__ + '.' +
class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin):
supported_extension_aliases = ['servicechain']
supported_extension_aliases = ['servicechain']
path_prefix = "/servicechain"
DB_SC_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' +
@ -288,12 +293,14 @@ class GroupPolicyDbTestCase(GroupPolicyDBTestBase,
'gp_plugin_name': gp_plugin,
'sc_plugin_name': sc_plugin}
test_policy_file = ETCDIR + "/test-policy.json"
cfg.CONF.set_override('policy_file', test_policy_file)
extensions.append_api_extensions_path(
gbpservice.neutron.extensions.__path__)
super(GroupPolicyDbTestCase, self).setUp(
plugin=core_plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins
)
test_policy_file = ETCDIR + "/test-policy.json"
policy.refresh(policy_file=test_policy_file)
self.plugin = importutils.import_object(gp_plugin)
self._sc_plugin = importutils.import_object(sc_plugin)
if not ext_mgr:

View File

@ -28,6 +28,7 @@ from gbpservice.neutron.tests.unit.db.grouppolicy import (
class GroupPolicyMappingDBTestPlugin(gpmdb.GroupPolicyMappingDbPlugin):
supported_extension_aliases = ['group-policy', 'group-policy-mapping']
path_prefix = "/grouppolicy"
DB_GP_PLUGIN_KLASS = (GroupPolicyMappingDBTestPlugin.__module__ + '.' +

View File

@ -14,9 +14,9 @@
import webob.exc
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from gbpservice.neutron.db import servicechain_db as svcchain_db
from gbpservice.neutron.extensions import servicechain as service_chain
@ -69,7 +69,8 @@ class ServiceChainDBTestBase(test_group_policy_db.GroupPolicyDBTestBase):
class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin):
supported_extension_aliases = ['servicechain']
supported_extension_aliases = ['servicechain']
path_prefix = "/servicechain"
DB_GP_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' +
ServiceChainDBTestPlugin.__name__)

View File

@ -14,11 +14,13 @@
import copy
import sys
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
import mock
import netaddr
import webob.exc
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
from apic_ml2.neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import rpc as n_rpc
from neutron import context
@ -26,8 +28,6 @@ from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as n_db
from neutron.db import model_base
from neutron import manager
from neutron.tests.unit.plugins.ml2.drivers.cisco.apic import (
base as mocked)
from opflexagent import constants as ocst
from oslo_config import cfg

View File

@ -201,4 +201,4 @@ class NeutronV2ApiTestCase(unittest.TestCase):
self._test_router_interface('add')
def test_remove_router_interface(self):
self._test_router_interface('remove')
self._test_router_interface('remove')

View File

@ -1,106 +0,0 @@
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.plugins.ml2 import test_plugin
from oslo_config import cfg
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers.nuage import (
driver as nuage_driver)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_grouppolicy_plugin as test_gp_plugin)
FAKE_GBP_APP = 'ut_gbp_app'
FAKE_DEFAULT_ENT = 'default'
NUAGE_PLUGIN_PATH = 'neutron.plugins.nuage.plugin'
FAKE_SERVER = '1.1.1.1'
FAKE_SERVER_AUTH = 'user:pass'
FAKE_SERVER_SSL = False
FAKE_BASE_URI = '/base/'
FAKE_AUTH_RESOURCE = '/auth'
FAKE_ORGANIZATION = 'fake_org'
class FakeNuageGBPClient(object):
def __init__(self, server, base_uri, serverssl,
serverauth, auth_resource, organization):
pass
def create_ptg_postcommit(self, context, application):
pass
def update_ptg_postcommit(self, context, gbp_policyruleset,
application):
pass
def delete_ptg_postcommit(self, context, application):
pass
def create_policyrule_postcommit(self, context, gbp_action,
gbp_classifier, application):
pass
def delete_policyrule_postcommit(self, context, application):
pass
def create_policytarget(self, context, port, ptg, application):
pass
class NuageGBPDriverTestCase(test_gp_plugin.GroupPolicyPluginTestCase):
def setUp(self):
config.cfg.CONF.set_override('policy_drivers',
['implicit_policy', 'resource_mapping',
'nuage_gbp_driver', 'chain_mapping'],
group='group_policy')
ml2_opts = {
'mechanism_drivers': ['nuage_gbp'],
}
for opt, val in ml2_opts.items():
cfg.CONF.set_override(opt, val, 'ml2')
def mock_nuageclient_init(self):
server = FAKE_SERVER
serverauth = FAKE_SERVER_AUTH
serverssl = FAKE_SERVER_SSL
base_uri = FAKE_BASE_URI
auth_resource = FAKE_AUTH_RESOURCE
organization = FAKE_ORGANIZATION
self.nuageclient = FakeNuageGBPClient(server,
base_uri,
serverssl,
serverauth,
auth_resource,
organization)
self.nuage_app = FAKE_GBP_APP
with mock.patch.object(nuage_driver.NuageGBPDriver,
'nuageclient_init', new=mock_nuageclient_init):
super(NuageGBPDriverTestCase, self).setUp(
core_plugin=test_plugin.PLUGIN_NAME)
class TestPolicyTargetGroup(NuageGBPDriverTestCase):
pass
class TestPolicyRuleSet(NuageGBPDriverTestCase):
pass
class TestPolicyRule(NuageGBPDriverTestCase):
pass

View File

@ -1,204 +0,0 @@
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from gbpservice.neutron.services.grouppolicy.drivers.oneconvergence import (
nvsd_gbp_api as api)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_resource_mapping)
class MockNVSDApiClient(object):
def create_endpoint(self, context, endpoint):
pass
def update_endpoint(self, context, updated_endpoint):
pass
def delete_endpoint(self, context, endpoint_id):
pass
def create_endpointgroup(self, context, endpointgroup):
pass
def update_endpointgroup(self, context, endpointgroup):
pass
def delete_endpointgroup(self, context, endpointgroup_id):
pass
def create_policy_classifier(self, context, policy_classifier):
pass
def update_policy_classifier(self, context, policy_classifier):
pass
def delete_policy_classifier(self, context, policy_classifier_id):
pass
class OneConvergenceGBPDriverTestCase(
test_resource_mapping.ResourceMappingTestCase):
def setUp(self):
policy_drivers = ['implicit_policy', 'oneconvergence_gbp_driver',
'chain_mapping']
with mock.patch.object(
api, 'NVSDServiceApi',
new=MockNVSDApiClient) as self.mockNVSDApi:
super(OneConvergenceGBPDriverTestCase, self).setUp(
policy_drivers=policy_drivers)
class TestPolicyTarget(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyTarget):
# Functionality tests and api results are covered by the base class tests
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(MockNVSDApiClient,
'create_endpoint') as create_ep:
with mock.patch.object(MockNVSDApiClient,
'update_endpoint') as update_ep:
with mock.patch.object(MockNVSDApiClient,
'delete_endpoint') as delete_ep:
ptg = self.create_policy_target_group(name="ptg1")
ptg_id = ptg['policy_target_group']['id']
# Create policy_target with implicit port.
pt = self.create_policy_target(
name="pt1",
policy_target_group_id=ptg_id)['policy_target']
create_ep.assert_called_once_with(mock.ANY, pt)
pt = self.update_policy_target(
pt['id'], name="new_pt")['policy_target']
update_ep.assert_called_once_with(mock.ANY, pt)
self.delete_policy_target(pt['id'])
delete_ep.assert_called_once_with(mock.ANY, pt['id'])
class TestPolicyTargetGroup(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyTargetGroup):
def test_subnet_allocation(self):
ptg1 = self.create_policy_target_group(name="ptg1")
subnet1 = ptg1['policy_target_group']['subnets']
ptg2 = self.create_policy_target_group(name="ptg2")
subnet2 = ptg2['policy_target_group']['subnets']
self.assertEqual(subnet1, subnet2)
def test_no_extra_subnets_created(self):
count = len(self._get_all_subnets())
self.create_policy_target_group()
self.create_policy_target_group()
new_count = len(self._get_all_subnets())
# One Convergence driver shares the same implicit subnet
self.assertEqual(count + 1, new_count)
def test_ip_pool_exhaustion(self):
# One Convergence driver shares the same implicit subnet
pass
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(MockNVSDApiClient,
'create_endpointgroup') as create_epg:
with mock.patch.object(MockNVSDApiClient,
'update_endpointgroup') as update_epg:
with mock.patch.object(MockNVSDApiClient,
'delete_endpointgroup') as delete_epg:
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
create_epg.assert_called_once_with(mock.ANY, ptg)
ptg = self.update_policy_target_group(
ptg['id'],
name="new_ptg")['policy_target_group']
update_epg.assert_called_once_with(mock.ANY, ptg)
self.delete_policy_target_group(ptg['id'])
delete_epg.assert_called_once_with(mock.ANY, ptg['id'])
class TestPolicyClassifier(OneConvergenceGBPDriverTestCase):
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(
MockNVSDApiClient,
'create_policy_classifier') as create_classifier:
with mock.patch.object(
MockNVSDApiClient,
'update_policy_classifier') as update_classifier:
with mock.patch.object(
MockNVSDApiClient,
'delete_policy_classifier') as delete_classifier:
classifier = self.create_policy_classifier(
name="classifier1")
classifier = classifier['policy_classifier']
classifier.update({"policy_rules": []})
create_classifier.assert_called_once_with(mock.ANY,
classifier)
classifier = self.update_policy_classifier(
classifier['id'],
name="new_classifier")['policy_classifier']
classifier.update({"policy_rules": []})
update_classifier.assert_called_once_with(mock.ANY,
classifier)
self.delete_policy_classifier(classifier['id'])
delete_classifier.assert_called_once_with(
mock.ANY, classifier['id'])
class TestL2Policy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestL2Policy):
pass
class TestL3Policy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestL3Policy):
pass
class TestPolicyRuleSet(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyRuleSet):
pass
class TestServiceChain(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestServiceChain):
pass
class TestServiceChainAdminOwner(
OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestServiceChainAdminOwner):
pass
class TestPolicyAction(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyAction):
pass
class TestPolicyRule(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyRule):
pass
class TestExternalSegment(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestExternalSegment):
pass
class TestExternalPolicy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestExternalPolicy):
pass

View File

@ -26,11 +26,11 @@ from neutron.extensions import external_net as external_net
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.notifiers import nova
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as pconst
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.extensions import test_securitygroup
from neutron.tests.unit.plugins.ml2 import test_plugin as n_test_plugin
from oslo_utils import uuidutils
import webob.exc
from gbpservice.common import utils

View File

@ -18,9 +18,9 @@ import heatclient
import mock
from neutron import context as neutron_context
from neutron.extensions import external_net as external_net
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import webob
from gbpservice.neutron.services.servicechain.plugins.ncp import config

View File

@ -192,4 +192,4 @@ class TestImplicitServiceChains(ResourceMappingStitchingPlumberGBPTestCase,
# Being service targets, port filter and hybrid plug will be false
port = self._bind_port_to_host(pt['port_id'], 'host')['port']
self.assertTrue(port['binding:vif_details']['port_filter'])
self.assertTrue(port['binding:vif_details']['ovs_hybrid_plug'])
self.assertTrue(port['binding:vif_details']['ovs_hybrid_plug'])

View File

@ -13,9 +13,9 @@
import heatclient
import mock
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import webob
from gbpservice.neutron.services.servicechain.plugins.msc import config

View File

@ -15,11 +15,11 @@ import re
import mock
from neutron.common import constants as n_consts
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import base as test_extensions_base
from oslo_utils import uuidutils
from webob import exc
from gbpservice.neutron.extensions import group_policy as gp

View File

@ -13,10 +13,10 @@
import copy
import mock
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import base as test_extensions_base
from oslo_utils import uuidutils
from webob import exc
from gbpservice.neutron.extensions import servicechain

View File

@ -15,4 +15,4 @@ import testtools
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
"""Test case base class for all unit tests."""

View File

@ -38,8 +38,12 @@ function init_gbpservice {
# install_gbpservice() - Collect source and prepare
function install_gbpservice {
sed -i '/gbptestneutron/d' $GBPSERVICE_DIR/test-requirements.txt
setup_develop $GBPSERVICE_DIR
pip_install -e $GBPSERVICE_DIR
#pip_install -e $GBP_DIR
#sed -i '/gbptestneutron/d' $GBPSERVICE_DIR/test-requirements.txt
#setup_develop $GBPSERVICE_DIR
\cp -rf $GBPSERVICE_DIR/etc/policy.json $Q_POLICY_FILE
sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
}
# install_gbpclient() - Collect source and prepare

View File

@ -1,6 +1,7 @@
[[local|localrc]]
DATABASE_PASSWORD=abc123
ADMIN_PASSWORD=abc123
MYSQL_PASSWORD=abc123
MYSQL_PASSWORD=$DATABASE_PASSWORD
RABBIT_PASSWORD=abc123
SERVICE_PASSWORD=$ADMIN_PASSWORD
SERVICE_TOKEN=abc123
@ -16,12 +17,12 @@ GIT_BASE=${GIT_BASE:-git://git.openstack.org}
GBPSERVICE_REPO=${GIT_BASE}/openstack/group-based-policy.git
GBPSERVICE_BRANCH=master
#GBPSERVICE_REPO=https://review.openstack.org/openstack/group-based-policy
#GBPSERVICE_BRANCH=refs/changes/20/130920/6
#GBPSERVICE_BRANCH=refs/changes/54/240954/47
GBPCLIENT_REPO=${GIT_BASE}/openstack/python-group-based-policy-client.git
GBPCLIENT_BRANCH=master
#GBPCLIENT_REPO=https://review.openstack.org/openstack/python-group-based-policy-client
#GBPCLIENT_BRANCH=refs/changes/78/165378/2
#GBPCLIENT_REPO=${GIT_BASE}/openstack/python-group-based-policy-client.git
#GBPCLIENT_BRANCH=master
GBPCLIENT_REPO=https://review.openstack.org/openstack/python-group-based-policy-client
GBPCLIENT_BRANCH=refs/changes/73/261773/1
GBPUI_REPO=${GIT_BASE}/openstack/group-based-policy-ui.git
GBPUI_BRANCH=master

View File

@ -1,20 +0,0 @@
# rootwrap command filters to support functional testing. It
# is NOT intended to be used outside of a test environment.
#
# This file should be owned by (and only-writeable by) the root user
[Filters]
# '$BASE_PATH' is intended to be replaced with the expected tox path
# (e.g. /opt/stack/new/neutron/.tox/dsvm-functional) by the neutron
# functional jenkins job. This ensures that tests can kill the
# processes that they launch with their containing tox environment's
# python.
kill_tox_python: KillFilter, root, $BASE_PATH/bin/python, -9
# enable ping from namespace
ping_filter: CommandFilter, ping, root
# enable curl from namespace
curl_filter: CommandFilter, curl, root
tee_filter: CommandFilter, tee, root
tee_kill: KillFilter, root, tee, -9

View File

@ -10,19 +10,18 @@ XTRACE=$(set +o | grep xtrace)
function prepare_gbp_devstack {
cd $TOP_DIR
sudo git checkout stable/kilo
sudo git checkout stable/liberty
sudo cp $CONTRIB_DIR/devstack/local.conf $TOP_DIR/local.conf
sudo cp $CONTRIB_DIR/devstack/exercises/*.sh $TOP_DIR/exercises/
sudo cp $CONTRIB_DIR/devstack/lib/* $TOP_DIR/lib/
sudo cp -r $CONTRIB_DIR/devstack/gbp-templates $TOP_DIR
sudo sed -i 's/DEST=\/opt\/stack/DEST=\/opt\/stack\/new/g' $TOP_DIR/stackrc
sudo sed -i 's/source $TOP_DIR\/lib\/neutron-legacy/source $TOP_DIR\/lib\/neutron-legacy\nsource $TOP_DIR\/lib\/gbp/g' $TOP_DIR/stack.sh
sudo sed -i 's/# Extras Configuration/install_gbpclient\ninit_gbpservice\n# Extras Configuration/g' $TOP_DIR/stack.sh
sudo sed -i 's/# Extras Configuration/install_gbpclient\ninstall_gbpservice\ninit_gbpservice\n# Extras Configuration/g' $TOP_DIR/stack.sh
sudo sed -i 's/echo_summary "Creating initial neutron network elements"//g' $TOP_DIR/stack.sh
sudo sed -i 's/create_neutron_initial_network//g' $TOP_DIR/stack.sh
source $TOP_DIR/functions
source $TOP_DIR/functions-common
pip_install -e $GBP_DIR
}
function source_creds {

View File

@ -11,13 +11,6 @@ trap prepare_logs ERR
prepare_gbp_devstack
$TOP_DIR/stack.sh
# Add a rootwrap filter to support test-only
# configuration (e.g. a KillFilter for processes that
# use the python installed in a tox env).
FUNC_FILTER=$CONTRIB_DIR/filters.template
sed -e "s+\$BASE_PATH+$BASE/new/group-based-policy/.tox/dsvm-functional+" \
$FUNC_FILTER | sudo tee /etc/neutron/rootwrap.d/functional.filters > /dev/null
# Use devstack functions to install mysql and psql servers
source $TOP_DIR/stackrc
source $TOP_DIR/lib/database

View File

@ -2,26 +2,34 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-e git+https://github.com/openstack/neutron.git@stable/kilo#egg=neutron
-e git+https://github.com/noironetworks/apic-ml2-driver.git@master#egg=apic_ml2
-e git+https://github.com/openstack/python-heatclient@stable/kilo#egg=heatclient
-e git+https://github.com/openstack/neutron.git@stable/liberty#egg=neutron
# REVISIT: the following needs to be replaced with an updated reference
-e git://github.com/noironetworks/networking-cisco.git@sumit/stable/liberty#egg=networking-cisco
# REVISIT: the following needs to be replaced with an updated reference
-e git+https://github.com/noironetworks/apic-ml2-driver.git@temp/liberty#egg=apic_ml2
-e git+https://github.com/openstack/python-heatclient@stable/liberty#egg=heatclient
hacking>=0.9.2,<0.10
cliff<1.11.0,>=1.10.0 # Apache-2.0
cliff>=1.14.0 # Apache-2.0
coverage>=3.6
fixtures>=0.3.14
fixtures>=1.3.1
httplib2>=0.7.5
mock<1.1.0,>=1.0
ordereddict
mock>=1.2
python-subunit>=0.0.18
requests-mock>=0.6.0 # Apache-2.0
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
ordereddict
testrepository>=0.0.18
testtools!=1.2.0,>=0.9.36
testtools>=1.4.0
testresources>=0.2.4
testscenarios>=0.4
WebTest>=2.0
oslotest<1.6.0,>=1.5.1 # Apache-2.0
tempest-lib<0.5.0,>=0.4.0
oslotest>=1.10.0 # Apache-2.0
os-testr>=0.1.0
tempest-lib>=0.8.0
ddt>=0.7.0
pylint==1.4.4 # GNU GPL v2
reno>=0.1.1 # Apache2
-e git+https://github.com/noironetworks/python-opflex-agent.git#egg=opflexagent
-e git+https://github.com/noironetworks/apic-ml2-driver.git#egg=neutron_ml2_driver_apic
-e git+https://github.com/noironetworks/python-opflex-agent.git@master#egg=python-opflexagent-agent

View File

@ -80,7 +80,11 @@ commands = python setup.py build_sphinx
# H405 multi line docstring summary not separated with an empty line
# H904 Wrap long lines in parentheses instead of a backslash
# TODO(marun) H404 multi line docstring should start with a summary
ignore = E125,E126,E128,E129,E251,E265,E713,F402,F811,F812,H104,H237,H305,H307,H401,H402,H404,H405,H904
# N324 Prevent use of deprecated contextlib.nested
# N325 Python 3: Do not use xrange
# N326 Python 3: do not use basestring
# N327 Python 3: do not use dict.iteritems
ignore = E125,E126,E128,E129,E251,E265,E713,F402,F811,F812,H104,H237,H305,H307,H401,H402,H404,H405,H904,N324,N325,N326,N327
show-source = true
builtins = _
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,