[AIM] Validation/Repair/Migration Tool

Adds validation and repair framework that calls into mechanism, policy
and SFC drivers to validate mappings to Neutron resources and to AIM
resources. The mappings from all standard Neutron resources to AIM are
currently handled. New unit tests are provided for validation of each
resource, and validation calls are added to several existing unit
tests.

A simple command line interface is used to perform validation,
optionally repairing invalid state. This is run using 'gbp-validate
<neutron-server args> [--repair]'. The same arguments that are passed
to specify the configuration of neutron-server must be passed to
gbp-validate.

Validation of GBP and SFC resources and of SVI networks is not yet
implemented. Attempting to validate deployments where these resources
exist intentionally fails, even when repair is enabled, so that valid
deployments are not corrupted. Proper validation of these resources
will be addressed in followup patches.

For isomorphic address scopes with a non-pre-existing VRF, the VRF's
display name currently depends on the order in which the address
scopes were created. This will be addressed in a followup patch.

EPG domain association, static paths, and other aspects of port
binding are not yet validated. This will be addressed in a followup
patch.

Migration from the old APIC plugin to the unified plugin will require
associating existing subnets with subnetpools and rebinding all port,
which will also be addressed in a followup patch.

A simply neutron_aim exercise script is added to the AIM gate job that
runs gbp-validate with Neutron resources. Once validation of GBP
resources is implemented, similar gbp-validate calls will be added to
the gbp_aim exercise script.

Change-Id: I0c3fe9e2629f76ecca8b3c8a93f9534b2d946e14
This commit is contained in:
Robert Kukura 2017-10-13 15:25:12 -04:00
parent 487eb4bc36
commit 9b7b759221
16 changed files with 2143 additions and 37 deletions

View File

@ -42,6 +42,9 @@ class ProjectNameCache(object):
self.gbp = None
def _get_keystone_client(self):
# REVISIT: It seems load_from_conf_options() and
# keystoneclient auth plugins have been deprecated, and we
# should use keystoneauth instead.
LOG.debug("Getting keystone client")
auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP)
LOG.debug("Got auth: %s", auth)
@ -68,21 +71,18 @@ class ProjectNameCache(object):
inside a transaction with a project_id not already in the
cache.
"""
if project_id and project_id not in self.project_names:
self.load_projects()
if not project_id:
return
# TODO(rkukura): It seems load_from_conf_options() and
# keystoneclient auth plugins have been deprecated, and we
# should use keystoneauth instead.
if project_id not in self.project_names:
if self.keystone is None:
self._get_keystone_client()
LOG.debug("Calling project API")
projects = self.keystone.projects.list()
LOG.debug("Received projects: %s", projects)
for project in projects:
self.project_names[project.id] = project.name
def load_projects(self):
# REVISIT: Does this need locking to prevent concurrent calls?
if self.keystone is None:
self._get_keystone_client()
LOG.debug("Calling project API")
projects = self.keystone.projects.list()
LOG.debug("Received projects: %s", projects)
for project in projects:
self.project_names[project.id] = project.name
def get_project_name(self, project_id):
"""Get name of project from cache.

View File

@ -13,12 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from collections import namedtuple
import copy
import netaddr
import os
import re
import sqlalchemy as sa
from sqlalchemy import orm
from aim.aim_lib.db import model as aim_lib_model
from aim.aim_lib import nat_strategy
from aim import aim_manager
from aim.api import infra as aim_infra
@ -117,6 +121,11 @@ ACI_VPCPORT_DESCR_FORMAT = ('topology/pod-(\d+)/protpaths-(\d+)-(\d+)/pathep-'
'\[(.*)\]')
InterfaceValidationInfo = namedtuple(
'InterfaceValidationInfo',
['router_id', 'ip_address', 'subnet', 'scope_mapping'])
class KeystoneNotificationEndpoint(object):
filter_rule = oslo_messaging.NotificationFilter(
event_type='^identity.project.[updated|deleted]')
@ -440,8 +449,12 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# however negates any change to the Tenant object done by direct
# use of aimctl.
self.aim.create(aim_ctx, tenant, overwrite=True)
ap = aim_resource.ApplicationProfile(tenant_name=tenant_aname,
name=self.ap_name)
# REVISIT: Setting of display_name was added here to match
# aim_lib behavior when it creates APs, but the
# display_name aim_lib uses might vary.
ap = aim_resource.ApplicationProfile(
tenant_name=tenant_aname, name=self.ap_name,
display_name=aim_utils.sanitize_display_name(self.ap_name))
if not self.aim.get(aim_ctx, ap):
self.aim.create(aim_ctx, ap)
@ -455,6 +468,18 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
'name': mapping.domain_name})
return domains
def _get_vmm_domains(self, aim_ctx, ns):
domains = []
if not isinstance(ns, nat_strategy.NoNatStrategy):
aim_hd_mappings = self.aim.find(
aim_ctx, aim_infra.HostDomainMappingV2,
domain_type=utils.OPENSTACK_VMM_TYPE)
if aim_hd_mappings:
domains = self._get_unique_domains(aim_hd_mappings)
if not domains:
domains, _ = self.get_aim_domains(aim_ctx)
return domains
def create_network_precommit(self, context):
current = context.current
LOG.debug("APIC AIM MD creating network: %s", current)
@ -466,17 +491,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
l3out, ext_net, ns = self._get_aim_nat_strategy(current)
if not ext_net:
return # Unmanaged external network
aim_hd_mappings = self.aim.find(aim_ctx,
aim_infra.HostDomainMappingV2,
domain_type=utils.OPENSTACK_VMM_TYPE)
domains = []
if not isinstance(ns, nat_strategy.NoNatStrategy):
if aim_hd_mappings:
domains = self._get_unique_domains(aim_hd_mappings)
if not domains:
domains, _ = self.get_aim_domains(aim_ctx)
domains = self._get_vmm_domains(aim_ctx, ns)
ns.create_l3outside(aim_ctx, l3out, vmm_domains=domains)
ns.create_external_network(aim_ctx, ext_net)
# Get external CIDRs for all external networks that share
@ -2752,12 +2767,16 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def _topology_shared(self, topology):
for network_db in topology.values():
for entry in network_db.rbac_entries:
# Access is enforced by Neutron itself, and we only
# care whether or not the network is shared, so we
# ignore the entry's target_tenant.
if entry.action == rbac_db_models.ACCESS_SHARED:
return network_db
if self._network_shared(network_db):
return network_db
def _network_shared(self, network_db):
for entry in network_db.rbac_entries:
# Access is enforced by Neutron itself, and we only
# care whether or not the network is shared, so we
# ignore the entry's target_tenant.
if entry.action == rbac_db_models.ACCESS_SHARED:
return True
def _ip_for_subnet(self, subnet, fixed_ips):
subnet_id = subnet['id']
@ -2782,8 +2801,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
filter_by(id=scope_id).
one_or_none())
def _map_network(self, session, network):
tenant_aname = self.name_mapper.project(session, network['tenant_id'])
def _map_network(self, session, network, vrf=None):
tenant_aname = (vrf.tenant_name if vrf and vrf.tenant_name != 'common'
else self.name_mapper.project(
session, network['tenant_id']))
id = network['id']
aname = self.name_mapper.network(session, id)
@ -3983,3 +4004,541 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
registry.notify(aim_cst.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE,
self, context=context,
network_id=mapping.network_id)
def validate_aim_mapping(self, mgr):
# Register all AIM resource types used by mapping.
mgr.register_aim_resource_class(aim_infra.HostDomainMappingV2)
mgr.register_aim_resource_class(aim_resource.ApplicationProfile)
mgr.register_aim_resource_class(aim_resource.BridgeDomain)
mgr.register_aim_resource_class(aim_resource.Contract)
mgr.register_aim_resource_class(aim_resource.ContractSubject)
mgr.register_aim_resource_class(aim_resource.EndpointGroup)
mgr.register_aim_resource_class(aim_resource.ExternalNetwork)
mgr.register_aim_resource_class(aim_resource.ExternalSubnet)
mgr.register_aim_resource_class(aim_resource.Filter)
mgr.register_aim_resource_class(aim_resource.FilterEntry)
mgr.register_aim_resource_class(aim_resource.L3Outside)
mgr.register_aim_resource_class(aim_resource.PhysicalDomain)
mgr.register_aim_resource_class(aim_resource.SecurityGroup)
mgr.register_aim_resource_class(aim_resource.SecurityGroupRule)
mgr.register_aim_resource_class(aim_resource.SecurityGroupSubject)
mgr.register_aim_resource_class(aim_resource.Subnet)
mgr.register_aim_resource_class(aim_resource.Tenant)
mgr.register_aim_resource_class(aim_resource.VMMDomain)
mgr.register_aim_resource_class(aim_resource.VRF)
# Copy AIM resources that are managed via aimctl from actual
# to expected AIM stores.
for resource_class in [aim_infra.HostDomainMappingV2,
aim_resource.PhysicalDomain,
aim_resource.VMMDomain]:
for resource in mgr.actual_aim_resources(resource_class):
mgr.aim_mgr.create(mgr.expected_aim_ctx, resource)
# Register DB tables to be validated.
mgr.register_db_instance_class(
aim_lib_model.CloneL3Out, ['tenant_name', 'name'])
mgr.register_db_instance_class(
db.AddressScopeMapping, ['scope_id'])
mgr.register_db_instance_class(
db.NetworkMapping, ['network_id'])
# Determine expected AIM resources and DB records for each
# Neutron resource type. We stash a set identifying the
# projects that have been processed so far in the validation
# manager since this will be needed for both Neutron and GBP
# resources.
mgr._expected_projects = set()
self._validate_static_resources(mgr)
self._validate_address_scopes(mgr)
router_dbs, ext_net_routers = self._validate_routers(mgr)
self._validate_networks(mgr, router_dbs, ext_net_routers)
self._validate_security_groups(mgr)
self._validate_ports(mgr)
self._validate_subnetpools(mgr)
self._validate_floatingips(mgr)
def _validate_static_resources(self, mgr):
self._ensure_common_tenant(mgr.expected_aim_ctx)
self._ensure_unrouted_vrf(mgr.expected_aim_ctx)
self._ensure_any_filter(mgr.expected_aim_ctx)
self._setup_default_arp_dhcp_security_group_rules(
mgr.expected_aim_ctx)
def _validate_address_scopes(self, mgr):
for scope_db in mgr.actual_session.query(as_db.AddressScope):
self._expect_project(mgr, scope_db.project_id)
mapping = scope_db.aim_mapping
if mapping and not mapping.vrf_owned:
mgr.expect_db_instance(mapping)
else:
vrf = self._map_address_scope(mgr.expected_session, scope_db)
mapping = self._add_address_scope_mapping(
mgr.expected_session, scope_db.id, vrf)
vrf = self._get_address_scope_vrf(mapping)
vrf.monitored = not mapping.vrf_owned
# REVISIT: Need deterministic display_name for isomorphic
# address scopes that own VRF.
vrf.display_name = (
aim_utils.sanitize_display_name(scope_db.name)
if mapping.vrf_owned else "")
vrf.policy_enforcement_pref = 'enforced'
mgr.expect_aim_resource(vrf)
def _validate_routers(self, mgr):
router_dbs = {}
ext_net_routers = defaultdict(list)
for router_db in mgr.actual_session.query(l3_db.Router):
self._expect_project(mgr, router_db.project_id)
router_dbs[router_db.id] = router_db
if router_db.gw_port_id:
ext_net_routers[router_db.gw_port.network_id].append(
router_db.id)
contract, subject = self._map_router(
mgr.expected_session, router_db)
dname = aim_utils.sanitize_display_name(router_db.name)
contract.scope = "context"
contract.display_name = dname
contract.monitored = False
mgr.expect_aim_resource(contract)
subject.in_filters = []
subject.out_filters = []
subject.bi_filters = [self._any_filter_name]
subject.service_graph_name = ''
subject.in_service_graph_name = ''
subject.out_service_graph_name = ''
subject.display_name = dname
subject.monitored = False
mgr.expect_aim_resource(subject)
return router_dbs, ext_net_routers
def _validate_networks(self, mgr, router_dbs, ext_net_routers):
net_dbs = {net_db.id: net_db for net_db in
mgr.actual_session.query(models_v2.Network)}
router_ext_prov, router_ext_cons = self._get_router_ext_contracts(mgr)
routed_nets = self._get_router_interface_info(mgr)
network_vrfs, router_vrfs = self._determine_vrfs(
mgr, net_dbs, routed_nets)
for net_db in net_dbs.values():
self._expect_project(mgr, net_db.project_id)
for subnet_db in net_db.subnets:
self._expect_project(mgr, subnet_db.project_id)
bd = None
epg = None
vrf = None
ext_net = None
if net_db.external:
bd, epg, vrf = self._validate_external_network(
mgr, net_db, ext_net_routers, router_dbs, router_vrfs,
router_ext_prov, router_ext_cons)
elif self._is_svi_db(net_db):
mgr.validation_failed(
"SVI network validation not yet implemented")
else:
bd, epg, vrf = self._validate_normal_network(
mgr, net_db, network_vrfs, router_dbs, routed_nets)
# Copy binding-related attributes from actual EPG to
# expected EPG.
#
# REVISIT: Should compute expected values, but current
# domain and static_path code needs significant
# refactoring to enable re-use. The resulting static_paths
# also may not be deterministic, at least in the SVI BGP
# case. We therefore may need to validate that the actual
# values are sensible rather than computing the expected
# values.
if epg:
actual_epg = mgr.actual_aim_resource(epg)
if actual_epg:
expected_epg = mgr.expected_aim_resource(epg)
expected_epg.vmm_domains = actual_epg.vmm_domains
expected_epg.physical_domains = actual_epg.physical_domains
expected_epg.static_paths = actual_epg.static_paths
# REVISIT: Move to ValidationManager, just before
# comparing actual and expected resources?
expected_epg.openstack_vmm_domain_names = [
d['name'] for d in expected_epg.vmm_domains
if d['type'] == 'OpenStack']
expected_epg.physical_domain_names = [
d['name'] for d in expected_epg.physical_domains]
else:
# REVISIT: Force rebinding of ports using this
# EPG?
pass
# Expect NetworkMapping record if applicable.
if bd or epg or vrf or ext_net:
self._add_network_mapping(
mgr.expected_session, net_db.id, bd, epg, vrf, ext_net)
def _get_router_ext_contracts(self, mgr):
# Get external contracts for routers.
router_ext_prov = defaultdict(set)
router_ext_cons = defaultdict(set)
for contract in mgr.actual_session.query(
extension_db.RouterExtensionContractDb):
if contract.provides:
router_ext_prov[contract.router_id].add(contract.contract_name)
else:
router_ext_cons[contract.router_id].add(contract.contract_name)
return router_ext_prov, router_ext_cons
def _get_router_interface_info(self, mgr):
# Find details of all router interfaces for each routed network.
routed_nets = defaultdict(list)
for intf in (
mgr.actual_session.query(l3_db.RouterPort.router_id,
models_v2.IPAllocation.ip_address,
models_v2.Subnet,
db.AddressScopeMapping).
join(models_v2.IPAllocation,
models_v2.IPAllocation.port_id ==
l3_db.RouterPort.port_id).
join(models_v2.Subnet,
models_v2.Subnet.id ==
models_v2.IPAllocation.subnet_id).
outerjoin(models_v2.SubnetPool,
models_v2.SubnetPool.id ==
models_v2.Subnet.subnetpool_id).
outerjoin(db.AddressScopeMapping,
db.AddressScopeMapping.scope_id ==
models_v2.SubnetPool.address_scope_id).
filter(l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF)):
intf = InterfaceValidationInfo._make(intf)
routed_nets[intf.subnet.network_id].append(intf)
return routed_nets
def _determine_vrfs(self, mgr, net_dbs, routed_nets):
# Determine VRFs for all scoped routed networks, as well as
# unscoped topology information.
network_vrfs = {}
router_vrfs = defaultdict(dict)
unscoped_net_router_ids = {}
unscoped_router_net_ids = defaultdict(set)
unscoped_net_dbs = {}
shared_unscoped_net_ids = []
for intfs in routed_nets.values():
net_id = None
v4_scope_mapping = None
v6_scope_mapping = None
router_ids = set()
for intf in intfs:
router_ids.add(intf.router_id)
if not net_id:
net_id = intf.subnet.network_id
if intf.scope_mapping:
if intf.subnet.ip_version == 4:
if (v4_scope_mapping and
v4_scope_mapping != intf.scope_mapping):
mgr.validation_failed(
"inconsistent IPv4 scopes for network %s" %
intfs)
else:
v4_scope_mapping = intf.scope_mapping
elif intf.subnet.ip_version == 6:
if (v6_scope_mapping and
v6_scope_mapping != intf.scope_mapping):
mgr.validation_failed(
"inconsistent IPv6 scopes for network %s" %
intfs)
else:
v6_scope_mapping = intf.scope_mapping
# REVISIT: If there is a v6 scope and no v4 scope, but
# there are unscoped v4 subnets, should the unscoped
# topology's default VRF be used instead? Or should
# validation fail?
scope_mapping = v4_scope_mapping or v6_scope_mapping
if scope_mapping:
vrf = self._get_address_scope_vrf(scope_mapping)
network_vrfs[net_id] = vrf
for router_id in router_ids:
router_vrfs[router_id][tuple(vrf.identity)] = vrf
else:
unscoped_net_router_ids[net_id] = router_ids
for router_id in router_ids:
unscoped_router_net_ids[router_id].add(net_id)
net_db = net_dbs[net_id]
unscoped_net_dbs[net_id] = net_db
if self._network_shared(net_db):
shared_unscoped_net_ids.append(intf.subnet.network_id)
default_vrfs = set()
def use_default_vrf(net_db):
vrf = self._map_default_vrf(mgr.expected_session, net_db)
key = tuple(vrf.identity)
if key not in default_vrfs:
default_vrfs.add(key)
vrf.display_name = 'DefaultRoutedVRF'
vrf.policy_enforcement_pref = 'enforced'
vrf.monitored = False
mgr.expect_aim_resource(vrf)
network_vrfs[net_db.id] = vrf
return vrf
def expand_shared_topology(net_id, vrf):
for router_id in unscoped_net_router_ids[net_id]:
router_vrfs[router_id][tuple(vrf.identity)] = vrf
for net_id in unscoped_router_net_ids[router_id]:
if net_id not in network_vrfs:
network_vrfs[net_id] = vrf
expand_shared_topology(net_id, vrf)
# Process shared unscoped topologies.
for net_id in shared_unscoped_net_ids:
if net_id not in network_vrfs:
vrf = use_default_vrf(unscoped_net_dbs[net_id])
expand_shared_topology(net_id, vrf)
# Process remaining (unshared) unscoped networks.
for net_db in unscoped_net_dbs.values():
if net_db.id not in network_vrfs:
vrf = use_default_vrf(net_db)
for router_id in unscoped_net_router_ids[net_db.id]:
router_vrfs[router_id][tuple(vrf.identity)] = vrf
return network_vrfs, router_vrfs
def _validate_normal_network(self, mgr, net_db, network_vrfs, router_dbs,
routed_nets):
routed_vrf = network_vrfs.get(net_db.id)
vrf = routed_vrf or self._map_unrouted_vrf()
bd, epg = self._map_network(mgr.expected_session, net_db, vrf)
router_contract_names = set()
for intf in routed_nets.get(net_db.id, []):
# REVISIT: Refactor to share code.
gw_ip = intf.ip_address
router_db = router_dbs[intf.router_id]
dname = aim_utils.sanitize_display_name(
router_db['name'] + '-' +
(intf.subnet.name or intf.subnet.cidr))
sn = self._map_subnet(intf.subnet, gw_ip, bd)
sn.scope = 'public'
sn.display_name = dname
sn.monitored = False
mgr.expect_aim_resource(sn)
contract = self._map_router(
mgr.expected_session, router_db, True)
router_contract_names.add(contract.name)
router_contract_names = list(router_contract_names)
# REVISIT: Refactor to share code.
dname = aim_utils.sanitize_display_name(net_db.name)
bd.display_name = dname
bd.vrf_name = vrf.name
bd.enable_arp_flood = True
bd.enable_routing = len(router_contract_names) is not 0
bd.limit_ip_learn_to_subnets = True
bd.ep_move_detect_mode = 'garp'
bd.l3out_names = []
bd.monitored = False
mgr.expect_aim_resource(bd)
epg.display_name = dname
epg.bd_name = bd.name
epg.policy_enforcement_pref = 'unenforced'
epg.provided_contract_names = router_contract_names
epg.consumed_contract_names = router_contract_names
epg.openstack_vmm_domain_names = []
epg.physical_domain_names = []
epg.vmm_domains = []
epg.physical_domains = []
epg.static_paths = []
epg.epg_contract_masters = []
epg.monitored = False
mgr.expect_aim_resource(epg)
return bd, epg, vrf
def _validate_external_network(self, mgr, net_db, ext_net_routers,
router_dbs, router_vrfs, router_ext_prov,
router_ext_cons):
l3out, ext_net, ns = self._get_aim_nat_strategy_db(
mgr.actual_session, net_db)
if not ext_net:
return None, None, None
# REVISIT: Avoid peicemeal queries against the actual DB
# throughout this code.
# REVISIT: We probably need to copy the external network's
# pre-existing resources, if they are monitored, from the
# actual AIM store to the validation AIM store, so that the
# NatStrategy behaves as expected during validation. But the
# current external connectivity UTs don't actually create
# pre-existing ExternalNetwork resources.
domains = self._get_vmm_domains(mgr.expected_aim_ctx, ns)
ns.create_l3outside(
mgr.expected_aim_ctx, l3out, vmm_domains=domains)
ns.create_external_network(mgr.expected_aim_ctx, ext_net)
# Get external CIDRs for all external networks that share this
# APIC external network.
ext_db = extension_db.ExtensionDbMixin()
cidrs = sorted(ext_db.get_external_cidrs_by_ext_net_dn(
mgr.actual_session, ext_net.dn, lock_update=False))
ns.update_external_cidrs(mgr.expected_aim_ctx, ext_net, cidrs)
for resource in ns.get_l3outside_resources(
mgr.expected_aim_ctx, l3out):
if isinstance(resource, aim_resource.BridgeDomain):
bd = resource
elif isinstance(resource, aim_resource.EndpointGroup):
epg = resource
elif isinstance(resource, aim_resource.VRF):
vrf = resource
for subnet_db in net_db.subnets:
ns.create_subnet(
mgr.expected_aim_ctx, l3out,
self._subnet_to_gw_ip_mask(subnet_db))
# REVISIT: Process each AIM ExternalNetwork rather than each
# external Neutron network?
eqv_net_ids = ext_db.get_network_ids_by_ext_net_dn(
mgr.actual_session, ext_net.dn, lock_update=False)
router_ids = set()
for eqv_net_id in eqv_net_ids:
router_ids.update(ext_net_routers[eqv_net_id])
vrf_routers = defaultdict(set)
int_vrfs = {}
for router_id in router_ids:
for int_vrf in router_vrfs[router_id].values():
key = tuple(int_vrf.identity)
vrf_routers[key].add(router_id)
int_vrfs[key] = int_vrf
for key, routers in vrf_routers.items():
prov = set()
cons = set()
for router_id in routers:
contract = self._map_router(
mgr.expected_session, router_dbs[router_id], True)
prov.add(contract.name)
cons.add(contract.name)
prov.update(router_ext_prov[router_id])
cons.update(router_ext_cons[router_id])
ext_net.provided_contract_names = sorted(prov)
ext_net.consumed_contract_names = sorted(cons)
int_vrf = int_vrfs[key]
# Keep only the identity attributes of the VRF so that
# calls to nat-library have consistent resource
# values. This is mainly required to ease unit-test
# verification. Note that this also effects validation
# of the L3Outside's display_name.
int_vrf = aim_resource.VRF(
tenant_name=int_vrf.tenant_name, name=int_vrf.name)
ns.connect_vrf(mgr.expected_aim_ctx, ext_net, int_vrf)
return bd, epg, vrf
def _validate_security_groups(self, mgr):
sg_ips = defaultdict(set)
for sg_id, ip in (
mgr.actual_session.query(
sg_models.SecurityGroupPortBinding.security_group_id,
models_v2.IPAllocation.ip_address).
join(models_v2.IPAllocation,
models_v2.IPAllocation.port_id ==
sg_models.SecurityGroupPortBinding.port_id)):
sg_ips[sg_id].add(ip)
for sg_db in (mgr.actual_session.query(sg_models.SecurityGroup).
options(orm.joinedload('rules'))):
# Ignore anonymous SGs, which seem to be a Neutron bug.
if sg_db.tenant_id:
self._expect_project(mgr, sg_db.project_id)
tenant_name = self.name_mapper.project(
mgr.expected_session, sg_db.tenant_id)
sg = aim_resource.SecurityGroup(
tenant_name=tenant_name, name=sg_db.id,
display_name=aim_utils.sanitize_display_name(sg_db.name))
mgr.expect_aim_resource(sg)
sg_subject = aim_resource.SecurityGroupSubject(
tenant_name=tenant_name, security_group_name=sg_db.id,
name='default')
mgr.expect_aim_resource(sg_subject)
for rule_db in sg_db.rules:
remote_ips = []
if rule_db.remote_group_id:
ip_version = (4 if rule_db.ethertype == 'IPv4' else
6 if rule_db.ethertype == 'IPv6' else
0)
remote_ips = [
ip for ip in sg_ips[rule_db.remote_group_id]
if netaddr.IPAddress(ip).version == ip_version]
elif rule_db.remote_ip_prefix:
remote_ips = [rule_db.remote_ip_prefix]
sg_rule = aim_resource.SecurityGroupRule(
tenant_name=tenant_name,
security_group_name=rule_db.security_group_id,
security_group_subject_name='default',
name=rule_db.id,
direction=rule_db.direction,
ethertype=rule_db.ethertype.lower(),
ip_protocol=(rule_db.protocol if rule_db.protocol
else 'unspecified'),
remote_ips=remote_ips,
from_port=(rule_db.port_range_min
if rule_db.port_range_min
else 'unspecified'),
to_port=(rule_db.port_range_max
if rule_db.port_range_max
else 'unspecified'))
mgr.expect_aim_resource(sg_rule)
def _validate_ports(self, mgr):
for project_id, in (
mgr.actual_session.query(models_v2.Port.project_id).
distinct()):
self._expect_project(mgr, project_id)
def _validate_subnetpools(self, mgr):
for project_id, in (
mgr.actual_session.query(models_v2.SubnetPool.project_id).
distinct()):
self._expect_project(mgr, project_id)
def _validate_floatingips(self, mgr):
for project_id, in (
mgr.actual_session.query(l3_db.FloatingIP.project_id).
distinct()):
self._expect_project(mgr, project_id)
def _expect_project(self, mgr, project_id):
# REVISIT: Currently called for all Neutron and GBP resources
# for which plugin create methods call _ensure_tenant. Remove
# once per-project resources are managed more dynamically.
if project_id and project_id not in mgr._expected_projects:
mgr._expected_projects.add(project_id)
tenant_name = self.name_mapper.project(
mgr.expected_session, project_id)
tenant = aim_resource.Tenant(name=tenant_name)
project_name = (
self.project_name_cache.get_project_name(project_id) or '')
tenant.display_name = aim_utils.sanitize_display_name(project_name)
tenant.descr = self.apic_system_id
tenant.monitored = False
mgr.expect_aim_resource(tenant)
ap = aim_resource.ApplicationProfile(
tenant_name=tenant_name, name=self.ap_name)
ap.display_name = aim_utils.sanitize_display_name(self.ap_name)
ap.monitored = False
mgr.expect_aim_resource(ap)

View File

@ -51,6 +51,8 @@ from gbpservice.neutron.services.grouppolicy.drivers import (
neutron_resources as nrd)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_mapping_rpc as aim_rpc)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_validation)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping_lib as alib)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -180,6 +182,10 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
def start_rpc_listeners(self):
return self.setup_opflex_rpc_listeners()
def validate_state(self, repair):
mgr = aim_validation.ValidationManager()
return mgr.validate(repair)
@property
def aim_mech_driver(self):
if not self._apic_aim_mech_driver:
@ -2433,3 +2439,95 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self._aim = None
self._name_mapper = None
self._aim_tenant_name = orig_aim_tenant_name
def validate_neutron_mapping(self, mgr):
# REVISIT: Implement.
pass
def validate_aim_mapping(self, mgr):
# REVISIT: Register all AIM resource types used by GBP mapping
# but not the Neutron mapping.
# REVISIT: Register DB tables to be validated.
# Determine expected AIM resources and DB records for each
# GBP resource type.
self._validate_l3_policies(mgr)
self._validate_l2_policies(mgr)
self._validate_policy_target_groups(mgr)
self._validate_policy_targets(mgr)
self._validate_application_policy_groups(mgr)
self._validate_policy_classifiers(mgr)
self._validate_policy_rule_sets(mgr)
self._validate_external_segments(mgr)
self._validate_external_policies(mgr)
# REVISIT: Do any of the following top-level GBP resources map
# to or effect AIM resources: NetworkServicePolicy,
# PolicyAction, NATPool?
def _validate_l3_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.L3Policy).first():
mgr.validation_failed(
"GBP->AIM validation for L3P not yet implemented")
def _validate_l2_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.L2Policy).first():
mgr.validation_failed(
"GBP->AIM validation for L2P not yet implemented")
def _validate_policy_target_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.PolicyTargetGroup).first():
mgr.validation_failed(
"GBP->AIM validation for PTG not yet implemented")
def _validate_policy_targets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.PolicyTarget).first():
mgr.validation_failed(
"GBP->AIM validation for PT not yet implemented")
def _validate_application_policy_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.ApplicationPolicyGroup).first():
mgr.validation_failed(
"GBP->AIM validation for APG not yet implemented")
def _validate_policy_classifiers(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.PolicyClassifier).first():
mgr.validation_failed(
"GBP->AIM validation for PC not yet implemented")
def _validate_policy_rule_sets(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.PolicyRuleSet).first():
mgr.validation_failed(
"GBP->AIM validation for PRS not yet implemented")
def _validate_external_segments(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources. This should probably be called from
# validate_neutron_mapping rather than validate_aim_mapping,
# since external_routes maps to the cisco_apic.EXTERNAL_CIDRS
# network extension.
if mgr.actual_session.query(gpdb.ExternalSegment).first():
mgr.validation_failed(
"GBP->AIM validation for ES not yet implemented")
def _validate_external_policies(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(gpdb.ExternalPolicy).first():
mgr.validation_failed(
"GBP->AIM validation for EP not yet implemented")

View File

@ -0,0 +1,424 @@
# Copyright (c) 2017 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import copy
from aim import aim_store
from aim.api import resource as aim_resource
from aim import context as aim_context
from neutron.db import api as db_api
from neutron_lib.plugins import directory
from oslo_log import log
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
LOG = log.getLogger(__name__)
class InternalValidationError(Exception):
pass
class ValidationManager(object):
def __init__(self):
# REVISIT: Defer until after validating config? Or pass in PD
# & MD?
self.core_plugin = directory.get_plugin()
self.md = self.core_plugin.mechanism_manager.mech_drivers[
'apic_aim'].obj
self.pd = self.md.gbp_driver
self.sfcd = None
sfc_plugin = directory.get_plugin('sfc')
if sfc_plugin:
driver = sfc_plugin.driver_manager.drivers.get('aim')
if driver:
self.sfcd = driver.obj
def validate(self, repair=False):
# REVISIT: Replace print calls throughout this module with an
# output stream that can be sent to stdout/stderr and/or an
# output file?
print("Validating deployment, repair: %s" % repair)
self.result = api.VALIDATION_PASSED
self.repair = repair
# REVISIT: Validate configuration.
# Load project names from Keystone.
self.md.project_name_cache.load_projects()
# Start transaction.
#
# REVISIT: Set session's isolation level to serializable?
self.actual_session = (db_api.get_writer_session() if repair
else db_api.get_reader_session())
self.actual_session.begin()
self.aim_mgr = self.md.aim
self.actual_aim_ctx = aim_context.AimContext(self.actual_session)
self.expected_session = ValidationSession(self)
self.expected_aim_ctx = aim_context.AimContext(
None, ValidationAimStore(self))
# Validate & repair GBP->Neutron mappings.
if self.pd:
self.pd.validate_neutron_mapping(self)
# Start with no expected or actual AIM resources or DB records.
self._expected_aim_resources = {}
self._actual_aim_resources = {}
self._expected_db_instances = {}
self._db_instance_primary_keys = {}
# Validate Neutron->AIM mapping, getting expected AIM
# resources and DB records.
self.md.validate_aim_mapping(self)
# Validate GBP->AIM mapping, getting expected AIM resources
# and DB records.
if self.pd:
self.pd.validate_aim_mapping(self)
# Validate SFC->AIM mapping, getting expected AIM resources
# and DB records.
if self.sfcd:
self.sfcd.validate_aim_mapping(self)
# Validate that actual AIM resources match expected AIM
# resources.
self._validate_aim_resources()
# Validate that actual DB instances match expected DB
# instances.
self._validate_db_instances()
# Commit or rollback transaction.
if self.result is api.VALIDATION_REPAIRED:
print("Committing repairs")
self.actual_session.commit()
else:
if self.repair and self.result is api.VALIDATION_FAILED:
print("Rolling back attempted repairs")
self.actual_session.rollback()
print("Validation result: %s" % self.result)
return self.result
def register_aim_resource_class(self, resource_class):
if resource_class not in self._expected_aim_resources:
self._expected_aim_resources[resource_class] = {}
self._actual_aim_resources[resource_class] = {
tuple(resource.identity): resource
for resource in self.aim_mgr.find(
self.actual_aim_ctx, resource_class)}
def expect_aim_resource(self, resource, replace=False):
expected_resources = self._expected_aim_resources[resource.__class__]
key = tuple(resource.identity)
if not replace and key in expected_resources:
print("resource %s already expected" % resource)
raise InternalValidationError()
for attr_name, attr_type in resource.other_attributes.items():
# REVISIT: May also need to dedup arrays of types other
# than string.
if (attr_type['type'] == 'array' and
attr_type['items']['type'] == 'string'):
value = list(set(getattr(resource, attr_name)))
setattr(resource, attr_name, value)
expected_resources[key] = resource
def expected_aim_resource(self, resource):
expected_resources = self._expected_aim_resources[resource.__class__]
key = tuple(resource.identity)
return expected_resources.get(key)
def expected_aim_resources(self, resource_class):
return self._expected_aim_resources[resource_class].values()
def actual_aim_resource(self, resource):
actual_resources = self._actual_aim_resources[resource.__class__]
key = tuple(resource.identity)
return actual_resources.get(key)
def actual_aim_resources(self, resource_class):
return self._actual_aim_resources[resource_class].values()
def register_db_instance_class(self, instance_class, primary_keys):
self._expected_db_instances.setdefault(instance_class, {})
self._db_instance_primary_keys[instance_class] = primary_keys
def expect_db_instance(self, instance):
instance_class = instance.__class__
expected_instances = self._expected_db_instances[instance_class]
primary_keys = self._db_instance_primary_keys[instance_class]
key = tuple([getattr(instance, k) for k in primary_keys])
expected_instances[key] = instance
def query_db_instances(self, entities, args, filters):
assert(1 == len(entities))
assert(0 == len(args))
instance_class = entities[0]
expected_instances = self._expected_db_instances[instance_class]
primary_keys = self._db_instance_primary_keys[instance_class]
if filters:
if (set(filters.keys()) == set(primary_keys)):
key = tuple([filters[k] for k in primary_keys])
instance = expected_instances.get(key)
return [instance] if instance else []
else:
return [i for i in expected_instances.values()
if all([getattr(i, k) == v for k, v in
filters.items()])]
else:
return expected_instances.values()
def should_repair(self, problem, action='Repairing'):
if self.repair and self.result is not api.VALIDATION_FAILED:
self.result = api.VALIDATION_REPAIRED
print("%s %s" % (action, problem))
return True
else:
self.validation_failed(problem)
def validation_failed(self, reason):
print("Failed due to %s" % reason)
self.result = api.VALIDATION_FAILED
def _validate_aim_resources(self):
for resource_class in self._expected_aim_resources.keys():
self._validate_aim_resource_class(resource_class)
def _validate_aim_resource_class(self, resource_class):
expected_resources = self._expected_aim_resources[resource_class]
for actual_resource in self.actual_aim_resources(resource_class):
key = tuple(actual_resource.identity)
expected_resource = expected_resources.pop(key, None)
self._validate_actual_aim_resource(
actual_resource, expected_resource)
for expected_resource in expected_resources.values():
self._handle_missing_aim_resource(expected_resource)
def _validate_actual_aim_resource(self, actual_resource,
expected_resource):
if not expected_resource:
# Some infra resources do not have the monitored
# attribute, but are treated as if they are monitored.
if not getattr(actual_resource, 'monitored', True):
self._handle_unexpected_aim_resource(actual_resource)
else:
# Some infra resources do not have the monitored
# attribute, but are treated as if they are monitored.
if getattr(expected_resource, 'monitored', True):
# REVISIT: Make sure actual resource is monitored, but
# ignore other differences.
pass
else:
if not expected_resource.user_equal(actual_resource):
self._handle_incorrect_aim_resource(
expected_resource, actual_resource)
def _handle_unexpected_aim_resource(self, actual_resource):
if self.should_repair(
"unexpected %(type)s: %(actual)r" %
{'type': actual_resource._aci_mo_name,
'actual': actual_resource},
"Deleting"):
self.aim_mgr.delete(self.actual_aim_ctx, actual_resource)
def _handle_incorrect_aim_resource(self, expected_resource,
actual_resource):
if self.should_repair(
"incorrect %(type)s: %(actual)r which should be: "
"%(expected)r" %
{'type': expected_resource._aci_mo_name,
'actual': actual_resource,
'expected': expected_resource}):
self.aim_mgr.create(
self.actual_aim_ctx, expected_resource, overwrite=True)
def _handle_missing_aim_resource(self, expected_resource):
if self.should_repair(
"missing %(type)s: %(expected)r" %
{'type': expected_resource._aci_mo_name,
'expected': expected_resource}):
self.aim_mgr.create(self.actual_aim_ctx, expected_resource)
def _validate_db_instances(self):
for db_class in self._expected_db_instances.keys():
self._validate_db_instance_class(db_class)
def _validate_db_instance_class(self, db_class):
expected_instances = self._expected_db_instances[db_class]
actual_instances = self.actual_session.query(db_class).all()
for actual_instance in actual_instances:
self._validate_actual_db_instance(
actual_instance, expected_instances)
for expected_instance in expected_instances.values():
self._handle_missing_db_instance(expected_instance)
def _validate_actual_db_instance(self, actual_instance,
expected_instances):
primary_keys = self._db_instance_primary_keys[
actual_instance.__class__]
key = tuple([getattr(actual_instance, k) for k in primary_keys])
expected_instance = expected_instances.pop(key, None)
if not expected_instance:
self._handle_unexpected_db_instance(actual_instance)
else:
if not self._is_db_instance_correct(
expected_instance, actual_instance):
self._handle_incorrect_db_instance(
expected_instance, actual_instance)
def _is_db_instance_correct(self, expected_instance, actual_instance):
expected_values = expected_instance.__dict__
actual_values = actual_instance.__dict__
return all([v == actual_values[k] for k, v in expected_values.items()
if not k.startswith('_')])
def _handle_unexpected_db_instance(self, actual_instance):
if self.should_repair(
"unexpected %(type)s record: %(actual)s" %
{'type': actual_instance.__tablename__,
'actual': actual_instance.__dict__},
"Deleting"):
self.actual_session.delete(actual_instance)
def _handle_incorrect_db_instance(self, expected_instance,
actual_instance):
if self.should_repair(
"incorrect %(type)s record: %(actual)s which should be: "
"%(expected)s" %
{'type': expected_instance.__tablename__,
'actual': actual_instance.__dict__,
'expected': expected_instance.__dict__}):
self.actual_session.merge(expected_instance)
def _handle_missing_db_instance(self, expected_instance):
if self.should_repair(
"missing %(type)s record: %(expected)s" %
{'type': expected_instance.__tablename__,
'expected': expected_instance.__dict__}):
self.actual_session.add(expected_instance)
class ValidationAimStore(aim_store.AimStore):
def __init__(self, validation_mgr):
self._mgr = validation_mgr
self.db_session = validation_mgr.expected_session
def add(self, db_obj):
self._mgr.expect_aim_resource(db_obj, True)
def delete(self, db_obj):
assert(False)
def query(self, db_obj_type, resource_class, in_=None, notin_=None,
order_by=None, lock_update=False, **filters):
assert(in_ is None)
assert(notin_ is None)
assert(order_by is None)
if filters:
if (set(filters.keys()) ==
set(resource_class.identity_attributes.keys())):
identity = resource_class(**filters)
resource = self._mgr.expected_aim_resource(identity)
return [resource] if resource else []
else:
return [r for r in
self._mgr.expected_aim_resources(resource_class)
if all([getattr(r, k) == v for k, v in
filters.items()])]
else:
return self._mgr.expected_aim_resources(resource_class)
def count(self, db_obj_type, resource_class, in_=None, notin_=None,
**filters):
assert(False)
def delete_all(self, db_obj_type, resource_class, in_=None, notin_=None,
**filters):
assert(False)
def from_attr(self, db_obj, resource_class, attribute_dict):
for k, v in attribute_dict.items():
setattr(db_obj, k, v)
def to_attr(self, resource_class, db_obj):
assert(False)
def make_resource(self, cls, db_obj, include_aim_id=False):
return copy.deepcopy(db_obj)
def make_db_obj(self, resource):
result = copy.deepcopy(resource)
if isinstance(result, aim_resource.EndpointGroup):
# Since aim.db.models.EndpointGroup.to_attr() maintains
# openstack_vmm_domain_names for backward compatibility,
# we do so here.
result.openstack_vmm_domain_names = [d['name'] for d in
result.vmm_domains
if d['type'] == 'OpenStack']
return result
@contextmanager
def _begin():
yield
class ValidationSession(object):
# This is a very minimal implementation of a sqlalchemy DB session
# (and query), providing only the functionality needed to simulate
# and validate DB usage buried within library code that cannot be
# otherwise validated. If more functionality is needed, consider
# using a sqlite-backed sqlalchemy session instead.
def __init__(self, validation_mgr):
self._mgr = validation_mgr
def begin(self, subtransactions=False, nested=False):
return _begin()
def add(self, instance):
self._mgr.expect_db_instance(instance)
def query(self, *entities, **kwargs):
return ValidationQuery(self._mgr, entities, kwargs)
class ValidationQuery(object):
def __init__(self, validation_mgr, entities, args):
self._mgr = validation_mgr
self._entities = entities
self._args = args
self._filters = {}
def filter_by(self, **kwargs):
self._filters.update(kwargs)
return self
def all(self):
return self._mgr.query_db_instances(
self._entities, self._args, self._filters)

View File

@ -18,6 +18,10 @@ from sqlalchemy.orm import exc as orm_exc
from gbpservice.common import utils
VALIDATION_PASSED = "passed"
VALIDATION_REPAIRED = "repaired"
VALIDATION_FAILED = "failed"
@six.add_metaclass(abc.ABCMeta)
class PolicyTargetContext(object):
@ -1386,6 +1390,17 @@ class PolicyDriver(object):
"""
pass
def validate_state(self, repair):
"""Validate persistent state managed by the driver.
:param repair: Repair invalid state if True.
Called from validation tool to validate policy driver's persistent
state. Returns VALIDATION_PASSED, VALIDATION_REPAIRED, or
VALIDATION_FAILED.
"""
return VALIDATION_PASSED
@six.add_metaclass(abc.ABCMeta)
class ExtensionDriver(object):

View File

@ -70,6 +70,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
def start_rpc_listeners(self):
return self.policy_driver_manager.start_rpc_listeners()
def validate_state(self, repair):
return self.policy_driver_manager.validate_state(repair)
@property
def servicechain_plugin(self):
# REVISIT(rkukura): Need initialization method after all

View File

@ -20,8 +20,9 @@ from oslo_utils import excutils
from sqlalchemy import exc as sqlalchemy_exc
import stevedore
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy import group_policy_driver_api
LOG = log.getLogger(__name__)
@ -169,7 +170,7 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
def ensure_tenant(self, plugin_context, tenant_id):
for driver in self.ordered_policy_drivers:
if isinstance(driver.obj, group_policy_driver_api.PolicyDriver):
if isinstance(driver.obj, api.PolicyDriver):
try:
driver.obj.ensure_tenant(plugin_context, tenant_id)
except Exception as e:
@ -493,3 +494,14 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
def start_rpc_listeners(self):
return self._call_on_drivers("start_rpc_listeners")
def validate_state(self, repair):
result = api.VALIDATION_PASSED
for driver in self.ordered_policy_drivers:
this_result = driver.obj.validate_state(repair)
if this_result == api.VALIDATION_FAILED:
result = this_result
elif (this_result == api.VALIDATION_REPAIRED and
result != api.VALIDATION_FAILED):
result = this_result
return result

View File

@ -16,6 +16,7 @@ from aim.api import service_graph as aim_sg
from aim import context as aim_context
from aim import utils as aim_utils
import netaddr
from networking_sfc.db import flowclassifier_db as flowc_db
from networking_sfc.db import sfc_db
from networking_sfc.extensions import flowclassifier as flowc_ext
from networking_sfc.extensions import sfc as sfc_ext
@ -976,3 +977,36 @@ class SfcAIMDriver(SfcAIMDriverBase):
name = self.name_mapper.network(plugin_context.session, net['id'],
prefix=cidr + '_')
return name
def validate_aim_mapping(self, mgr):
# REVISIT: Register all AIM resource types used by the SFC
# mapping but not the Neutron or GBP mappings.
# REVISIT: Register DB tables to be validated.
# Determine expected AIM resources and DB records for each
# SFC resource type.
self._validate_flow_classifiers(mgr)
self._validate_port_pair_groups(mgr)
self._validate_port_chains(mgr)
def _validate_flow_classifiers(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(flowc_db.FlowClassifier).first():
mgr.validation_failed(
"SFC->AIM validation for FC not yet implemented")
def _validate_port_pair_groups(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(sfc_db.PortPairGroup).first():
mgr.validation_failed(
"SFC->AIM validation for PPG not yet implemented")
def _validate_port_chains(self, mgr):
# REVISIT: Implement validation of actual mapping to AIM
# resources.
if mgr.actual_session.query(sfc_db.PortChain).first():
mgr.validation_failed(
"SFC->AIM validation for PC not yet implemented")

View File

@ -64,6 +64,10 @@ from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import data_migrations
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions
from gbpservice.neutron.plugins.ml2plus import patch_neutron
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as pd_api)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_validation as av)
PLUGIN_NAME = 'gbpservice.neutron.plugins.ml2plus.plugin.Ml2PlusPlugin'
@ -267,6 +271,8 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.port_create_status = 'DOWN'
self.validation_mgr = av.ValidationManager()
self.saved_keystone_client = ksc_client.Client
ksc_client.Client = FakeKeystoneClient
self.plugin = directory.get_plugin()
@ -307,6 +313,11 @@ class ApicAimTestCase(test_address_scope.AddressScopeTestCase,
patch_neutron.original_notify_loop)
super(ApicAimTestCase, self).tearDown()
def _validate(self):
# Validate should pass.
self.assertEqual(
pd_api.VALIDATION_PASSED, self.validation_mgr.validate())
def _find_by_dn(self, dn, cls):
aim_ctx = aim_context.AimContext(self.db_session)
resource = cls.from_dn(dn)
@ -2593,6 +2604,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, False)
check_default_vrf(t2, False)
self._validate()
# Add subnet 1 to router A, which should create tenant 1's
# default VRF.
@ -2607,6 +2619,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Add subnet 2 to router A.
add_interface(rA, net2, sn2, gw2A, t1)
@ -2620,6 +2633,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Add subnet 2 to router B.
add_interface(rB, net2, sn2, gw2B, t1)
@ -2633,6 +2647,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Add subnet 3 to router B.
add_interface(rB, net3, sn3, gw3B, t1)
@ -2646,6 +2661,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Add subnet 3 to router C.
add_interface(rC, net3, sn3, gw3C, t1)
@ -2659,6 +2675,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Add shared subnet 4 to router C, which should move router
# C's topology (networks 1, 2 and 3 and routers A, B and C) to
@ -2675,6 +2692,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [rC], [(gw4C, rC)], [], t2)
check_default_vrf(t1, False)
check_default_vrf(t2, True)
self._validate()
# Remove subnet 3 from router B, which should move router B's
# topology (networks 1 and 2 and routers A and B) to tenant 1
@ -2690,6 +2708,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [rC], [(gw4C, rC)], [], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, True)
self._validate()
# Add subnet 3 back to router B, which should move router B's
# topology (networks 1 and 2 and routers A and B) to tenant 2
@ -2705,6 +2724,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [rC], [(gw4C, rC)], [], t2)
check_default_vrf(t1, False)
check_default_vrf(t2, True)
self._validate()
# Remove subnet 2 from router B, which should move network 2's
# topology (networks 1 and 2 and router A) back to tenant 1
@ -2720,6 +2740,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [rC], [(gw4C, rC)], [], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, True)
self._validate()
# Add subnet 2 back to router B, which should move network 2's
# topology (networks 1 and 2 and router A) to tenant 2 again
@ -2735,6 +2756,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [rC], [(gw4C, rC)], [], t2)
check_default_vrf(t1, False)
check_default_vrf(t2, True)
self._validate()
# Remove subnet 4 from router C, which should move network 3's
# topology (networks 1, 2 and 3 and routers A and B) to tenant
@ -2751,6 +2773,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, True)
check_default_vrf(t2, False)
self._validate()
# Remove subnet 3 from router C.
remove_interface(rC, net3, sn3, gw3C, t1)
@ -2817,6 +2840,7 @@ class TestAimMapping(ApicAimTestCase):
check_net(net4, sn4, [], [], [gw4C], t2)
check_default_vrf(t1, False)
check_default_vrf(t2, False)
self._validate()
def test_address_scope_pre_existing_vrf(self):
aim_ctx = aim_context.AimContext(self.db_session)
@ -4735,6 +4759,7 @@ class TestExternalConnectivityBase(object):
self._check_dn(net1, ext_epg, 'EndpointGroup')
self._check_dn(net1, ext_bd, 'BridgeDomain')
self._check_dn(net1, ext_vrf, 'VRF')
self._validate()
# test no-op CIDR update
self.mock_ns.reset_mock()
@ -4748,6 +4773,7 @@ class TestExternalConnectivityBase(object):
{'network': {CIDR: ['33.33.33.0/30']}})['network']
self.mock_ns.update_external_cidrs.assert_called_once_with(
mock.ANY, a_ext_net, ['33.33.33.0/30'])
self._validate()
# delete
self.mock_ns.reset_mock()
@ -4768,6 +4794,7 @@ class TestExternalConnectivityBase(object):
mock.ANY, a_ext_net)
self.mock_ns.update_external_cidrs.assert_called_once_with(
mock.ANY, a_ext_net, ['0.0.0.0/0'])
self._validate()
def test_unmanaged_external_network_lifecycle(self):
net1 = self._make_ext_network('net1')
@ -4777,6 +4804,7 @@ class TestExternalConnectivityBase(object):
self._check_no_dn(net1, 'EndpointGroup')
self._check_no_dn(net1, 'BridgeDomain')
self._check_no_dn(net1, 'VRF')
self._validate()
self._delete('networks', net1['id'])
self.mock_ns.delete_l3outside.assert_not_called()
@ -4798,6 +4826,7 @@ class TestExternalConnectivityBase(object):
tenant_name=self.t1_aname, bd_name='EXT-l1',
gw_ip_mask='10.0.0.1/24')
self._check_dn(subnet, ext_sub, 'Subnet')
self._validate()
# Update gateway
self.mock_ns.reset_mock()
@ -4810,6 +4839,7 @@ class TestExternalConnectivityBase(object):
self.mock_ns.create_subnet.assert_called_once_with(
mock.ANY, l3out, '10.0.0.251/24')
self._check_dn(subnet, ext_sub, 'Subnet')
self._validate()
# delete subnet
self.mock_ns.reset_mock()
@ -4827,6 +4857,7 @@ class TestExternalConnectivityBase(object):
self.mock_ns.create_subnet.assert_not_called()
self._check_no_dn(subnet, 'Subnet')
self.assertEqual('N/A', subnet['apic:synchronization_state'])
self._validate()
# Update gateway
self._update('subnets', subnet['id'],
@ -4835,6 +4866,7 @@ class TestExternalConnectivityBase(object):
self.mock_ns.delete_subnet.assert_not_called()
self.mock_ns.create_subnet.assert_not_called()
self._check_no_dn(subnet, 'Subnet')
self._validate()
# delete subnet
self._delete('subnets', subnet['id'])
@ -4895,6 +4927,7 @@ class TestExternalConnectivityBase(object):
objs.setdefault(t, []).append(
tuple([router, [sub1, sub2], addr_scope]))
self.mock_ns.connect_vrf.assert_not_called()
self._validate()
# Connect the router interfaces to the subnets
vrf_objs = {}
@ -4927,6 +4960,7 @@ class TestExternalConnectivityBase(object):
cv.assert_called_once_with(mock.ANY, a_ext_net, a_vrf)
else:
cv.assert_not_called()
self._validate()
vrf_objs[tenant] = a_ext_net
# Remove the router interfaces
@ -4964,6 +4998,7 @@ class TestExternalConnectivityBase(object):
else:
cv.assert_not_called()
dv.assert_not_called()
self._validate()
self.mock_ns.reset_mock()
self._delete('routers', router['id'])
@ -5151,11 +5186,13 @@ class TestExternalConnectivityBase(object):
routers.append(r['id'])
contracts.append(self.name_mapper.router(None, r['id']))
cv.assert_not_called()
self._validate()
self._add_external_gateway_to_router(routers[0], ext_nets[0])
a_ext_nets[0].provided_contract_names = [contracts[0]]
a_ext_nets[0].consumed_contract_names = [contracts[0]]
cv.assert_called_once_with(mock.ANY, a_ext_nets[0], a_vrf)
self._validate()
self.mock_ns.reset_mock()
self._add_external_gateway_to_router(routers[1], ext_nets[1])
@ -5166,6 +5203,7 @@ class TestExternalConnectivityBase(object):
a_ext_nets[1].provided_contract_names = [contracts[1]]
a_ext_nets[1].consumed_contract_names = [contracts[1]]
cv.assert_called_once_with(mock.ANY, a_ext_nets[1], a_vrf)
self._validate()
self.mock_ns.reset_mock()
self._router_interface_action('remove', routers[0], sub1['id'], None)
@ -5179,12 +5217,14 @@ class TestExternalConnectivityBase(object):
a_ext_nets[0].consumed_contract_names = []
dv.assert_called_once_with(mock.ANY, a_ext_nets[0], a_vrf)
cv.assert_not_called()
self._validate()
self.mock_ns.reset_mock()
self._router_interface_action('remove', routers[1], sub1['id'], None)
a_ext_nets[1].provided_contract_names = []
a_ext_nets[1].consumed_contract_names = []
dv.assert_called_once_with(mock.ANY, a_ext_nets[1], a_vrf)
self._validate()
def test_multiple_router(self):
self._do_test_multiple_router(use_addr_scope=False)
@ -5559,6 +5599,7 @@ class TestExternalConnectivityBase(object):
name='EXT-l1')
self.assertEqual(1, len(ext_epg))
self.assertEqual(ext_epg[0].vmm_domains, vmm_domains)
self._validate()
def test_external_network_default_domains(self):
self._test_external_network_lifecycle_with_domains()
@ -5694,6 +5735,15 @@ class TestExternalNoNat(TestExternalConnectivityBase,
aim_ctx = aim_context.AimContext(self.db_session)
self.aim_mgr.update(aim_ctx, l3out, vrf_name=vrf_name)
def _validate(self):
# REVISIT: Validation does not currently work for these NoNat
# tests because the L3Out's vrf_name is not updated as in
# fix_l3out_vrf(), resulting in L3OutsideVrfChangeDisallowed
# exception from NoNatStrategy.connect_vrf(). A more realistic
# test using correctly setup monitored pre-existing
# ExternalNetwork resources should avoid this issue.
pass
def test_shared_unscoped_network(self):
# Skip test since the topology tested is not valid with no-NAT
pass
@ -5970,6 +6020,7 @@ class TestPortVlanNetwork(ApicAimTestCase):
# unbound port -> no static paths expected
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual([], epg.static_paths)
self._validate()
# bind to host h1
p1 = self._bind_port_to_host(p1['port']['id'], 'h1')
@ -5979,6 +6030,7 @@ class TestPortVlanNetwork(ApicAimTestCase):
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_h1,
'host': 'h1'}],
epg.static_paths)
self._validate()
# move port to host h2
p1 = self._bind_port_to_host(p1['port']['id'], 'h2')
@ -5988,12 +6040,14 @@ class TestPortVlanNetwork(ApicAimTestCase):
[{'path': hlink2.path, 'encap': 'vlan-%s' % vlan_h2,
'host': 'h2'}],
epg.static_paths)
self._validate()
# delete port
self._delete('ports', p1['port']['id'])
self._check_no_dynamic_segment(net1['id'])
epg = self.aim_mgr.get(aim_ctx, epg)
self.assertEqual([], epg.static_paths)
self._validate()
def test_port_lifecycle_internal_network(self):
self._do_test_port_lifecycle()
@ -7157,6 +7211,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
expected_binding_info=[('apic_aim', 'opflex')])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
self.assertEqual([], epg1.static_paths)
self._validate()
# port on non-opflex host
with self.port(subnet=sub1) as p2:
@ -7167,6 +7222,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
[{'path': self.hlink1.path, 'encap': 'vlan-%s' % vlan_p2,
'host': 'h1'}],
epg1.static_paths)
self._validate()
def test_mixed_ports_on_network_with_default_domains(self):
aim_ctx = aim_context.AimContext(self.db_session)
@ -7205,6 +7261,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
self.assertEqual(set([]),
set(self._doms(epg1.physical_domains,
with_type=False)))
self._validate()
# move port to another host
p1 = self._bind_port_to_host(p1['port']['id'], 'opflex-2')
epg1 = self.aim_mgr.get(aim_ctx, epg1)
@ -7214,6 +7271,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
self.assertEqual(set([]),
set(self._doms(epg1.physical_domains,
with_type=False)))
self._validate()
# delete port
self._delete('ports', p1['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)
@ -7234,6 +7292,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
self.assertEqual(set(['ph1', 'ph2']),
set(self._doms(epg1.physical_domains,
with_type=False)))
self._validate()
# move port to another host
p2 = self._bind_port_to_host(p2['port']['id'], 'h2')
epg1 = self.aim_mgr.get(aim_ctx, epg1)
@ -7243,6 +7302,7 @@ class TestPortOnPhysicalNode(TestPortVlanNetwork):
self.assertEqual(set(['ph1', 'ph2']),
set(self._doms(epg1.physical_domains,
with_type=False)))
self._validate()
# delete port
self._delete('ports', p2['port']['id'])
epg1 = self.aim_mgr.get(aim_ctx, epg1)

View File

@ -0,0 +1,789 @@
# Copyright (c) 2017 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from aim.aim_lib.db import model as aim_lib_model
from aim.api import infra as aim_infra
from aim.api import resource as aim_resource
from aim import context as aim_context
from neutron.tests.unit.extensions import test_securitygroup
from neutron_lib import context as n_context
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import db
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
aim_validation as av)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_aim_mapping_driver)
from gbpservice.neutron.tests.unit.services.sfc import test_aim_sfc_driver
class AimValidationTestMixin(object):
def _validate(self):
# Validate should pass.
self.assertEqual(api.VALIDATION_PASSED, self.av_mgr.validate())
def _validate_repair_validate(self):
# Validate should fail.
self.assertEqual(api.VALIDATION_FAILED, self.av_mgr.validate())
# Repair.
self.assertEqual(
api.VALIDATION_REPAIRED, self.av_mgr.validate(repair=True))
# Validate should pass.
self.assertEqual(api.VALIDATION_PASSED, self.av_mgr.validate())
def _validate_unrepairable(self):
# Repair should fail.
self.assertEqual(
api.VALIDATION_FAILED, self.av_mgr.validate(repair=True))
def _test_aim_resource(self, resource, unexpected_attr_name='name',
unexpected_attr_value='unexpected'):
resource = copy.copy(resource)
# Delete the AIM resource and test.
self.aim_mgr.delete(self.aim_ctx, resource)
self._validate_repair_validate()
# Modify the AIM resource and test.
self.aim_mgr.update(
self.aim_ctx, resource, display_name='not what it was')
self._validate_repair_validate()
# Add unexpected AIM resource and test.
setattr(resource, unexpected_attr_name, unexpected_attr_value)
self.aim_mgr.create(self.aim_ctx, resource)
self._validate_repair_validate()
# Add unexpected monitored AIM resource and test.
resource.monitored = True
self.aim_mgr.create(self.aim_ctx, resource)
self._validate()
# Delete unexpected monitored AIM resource.
self.aim_mgr.delete(self.aim_ctx, resource)
class AimValidationTestCase(test_aim_mapping_driver.AIMBaseTestCase,
test_securitygroup.SecurityGroupsTestCase,
AimValidationTestMixin):
def setUp(self):
super(AimValidationTestCase, self).setUp()
self.av_mgr = av.ValidationManager()
self.aim_ctx = aim_context.AimContext(self.db_session)
class TestNeutronMapping(AimValidationTestCase):
def setUp(self):
super(TestNeutronMapping, self).setUp()
def _test_routed_subnet(self, subnet_id, gw_ip):
# Get the AIM Subnet.
subnet = self._show('subnets', subnet_id)['subnet']
sn_dn = subnet['apic:distinguished_names'][gw_ip]
sn = aim_resource.Subnet.from_dn(sn_dn)
# Test the AIM Subnet.
self._test_aim_resource(sn, 'gw_ip_mask', '4.3.2.1/24')
def _test_unscoped_vrf(self, router_id):
# Get the router's unscoped AIM VRF.
router = self._show('routers', router_id)['router']
vrf_dn = router['apic:distinguished_names']['no_scope-VRF']
vrf = aim_resource.VRF.from_dn(vrf_dn)
# Test the AIM VRF.
self._test_aim_resource(vrf)
def test_static_resources(self):
# Validate with initial static resources.
self._validate()
# Delete the common Tenant and test.
tenant = aim_resource.Tenant(name='common')
self.aim_mgr.delete(self.aim_ctx, tenant)
self._validate_repair_validate()
# Test unrouted AIM VRF.
vrf = aim_resource.VRF(
name=self.driver.aim_mech_driver.apic_system_id + '_UnroutedVRF',
tenant_name='common')
self._test_aim_resource(vrf)
# Test the any Filter.
filter_name = (self.driver.aim_mech_driver.apic_system_id +
'_AnyFilter')
filter = aim_resource.Filter(
name=filter_name,
tenant_name='common')
self._test_aim_resource(filter)
# Test the any FilterEntry.
entry = aim_resource.FilterEntry(
name='AnyFilterEntry',
filter_name=filter_name,
tenant_name='common')
self._test_aim_resource(entry)
# Test the default SecurityGroup.
sg_name = (self.driver.aim_mech_driver.apic_system_id +
'_DefaultSecurityGroup')
sg = aim_resource.SecurityGroup(
name=sg_name,
tenant_name='common')
self._test_aim_resource(sg)
# Test the default SecurityGroupSubject.
sg_subject = aim_resource.SecurityGroupSubject(
name='default',
security_group_name=sg_name,
tenant_name='common')
self._test_aim_resource(sg_subject)
# Test one default SecurityGroupRule.
sg_rule = aim_resource.SecurityGroupRule(
name='arp_egress',
security_group_subject_name='default',
security_group_name=sg_name,
tenant_name='common')
self._test_aim_resource(sg_rule)
def _test_project_resources(self, project_id):
# Validate with initial project resources.
self._validate()
# Test AIM Tenant.
tenant_name = self.driver.aim_mech_driver.name_mapper.project(
None, project_id)
tenant = aim_resource.Tenant(name=tenant_name)
self._test_aim_resource(tenant)
# Test AIM ApplicationProfile.
ap = aim_resource.ApplicationProfile(
tenant_name=tenant_name, name='OpenStack')
self._test_aim_resource(ap)
def test_project_resources(self):
# REVISIT: Currently, a project's AIM Tenant and
# ApplicationProfile are created in ensure_tenant just before
# any Neutron/GBP resource is created using that project, and
# are not cleaned up when the last Neutron/GBP resource
# needing them is deleted. Instead, they are cleaned up when a
# notification is received from Keystone that the project has
# been deleted. We should consider managing these AIM
# resources more dynamically. If we do, this test will need to
# be reworked.
# Test address scope.
scope = self._make_address_scope(
self.fmt, 4, name='as1', tenant_id='as_proj')['address_scope']
self._test_project_resources(scope['project_id'])
# Test network.
net_resp = self._make_network(
self.fmt, 'net1', True, tenant_id='net_proj')
net = net_resp['network']
self._test_project_resources(net['project_id'])
# Test subnet.
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
tenant_id='subnet_proj')['subnet']
self._test_project_resources(subnet['project_id'])
# Test port. Since Neutron creates the default SG for the
# port's project even when security_groups=[] is passed, we
# need to delete the default SG to ensure the port is the only
# resource owned by port_prog.
port = self._make_port(
self.fmt, net['id'], security_groups=[],
tenant_id='port_proj')['port']
sgs = self._list(
'security-groups',
query_params='project_id=port_proj')['security_groups']
self.assertEqual(1, len(sgs))
self._delete('security-groups', sgs[0]['id'])
self._test_project_resources(port['project_id'])
# Test security group.
sg = self._make_security_group(
self.fmt, 'sg1', 'desc1', tenant_id='sg_proj')['security_group']
self._test_project_resources(sg['project_id'])
# Test subnetpool.
sp = self._make_subnetpool(
self.fmt, ['10.0.0.0/8'], name='sp1', tenant_id='sp_proj',
default_prefixlen=24)['subnetpool']
self._test_project_resources(sp['project_id'])
# Test router.
router = self._make_router(
self.fmt, 'router_proj', 'router1')['router']
self._test_project_resources(router['project_id'])
# Test floatingip.
kwargs = {'router:external': True}
ext_net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
ext_net = ext_net_resp['network']
self._make_subnet(
self.fmt, ext_net_resp, '100.100.100.1', '100.100.100.0/24')
fip = self._make_floatingip(
self.fmt, ext_net['id'], tenant_id='fip_proj')['floatingip']
self._test_project_resources(fip['project_id'])
def test_address_scope(self):
# Create address scope.
scope = self._make_address_scope(
self.fmt, 4, name='as1')['address_scope']
scope_id = scope['id']
vrf_dn = scope['apic:distinguished_names']['VRF']
self._validate()
# Delete the address scope's mapping record and test.
(self.db_session.query(db.AddressScopeMapping).
filter_by(scope_id=scope_id).
delete())
self._validate_repair_validate()
# Test AIM VRF.
vrf = aim_resource.VRF.from_dn(vrf_dn)
self._test_aim_resource(vrf)
# REVISIT: Test isomorphic address scopes.
def _test_network_resources(self, net_resp):
net = net_resp['network']
net_id = net['id']
bd_dn = net['apic:distinguished_names']['BridgeDomain']
epg_dn = net['apic:distinguished_names']['EndpointGroup']
# Create unrouted subnet.
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.2.1', '10.0.2.0/24')['subnet']
self._validate()
# Delete the network's mapping record and test.
(self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
delete())
self._validate_repair_validate()
# Corrupt the network's mapping record's BD and test.
with self.db_session.begin():
mapping = (self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
one())
mapping.bd_tenant_name = 'bad_bd_tenant_name'
self._validate_repair_validate()
# Corrupt the network's mapping record's EPG and test.
with self.db_session.begin():
mapping = (self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
one())
mapping.epg_app_profile_name = 'bad_epg_app_profilename'
self._validate_repair_validate()
# Corrupt the network's mapping record's VRF and test.
with self.db_session.begin():
mapping = (self.db_session.query(db.NetworkMapping).
filter_by(network_id=net_id).
one())
mapping.vrf_name = 'bad_vrf_name'
self._validate_repair_validate()
# Test AIM BridgeDomain.
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
self._test_aim_resource(bd)
# Test AIM EndpointGroup.
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
self._test_aim_resource(epg)
# Test AIM Subnet.
if not net['router:external']:
# Add unexpect AIM Subnet if not external.
sn = self.driver.aim_mech_driver._map_subnet(
subnet, '10.0.2.1', bd)
self.aim_mgr.create(self.aim_ctx, sn)
self._validate_repair_validate()
else:
# Test AIM Subnet if external.
#
# REVISIT: If Subnet DN were included in
# apic:distinguished_names, which it should be, could just
# use _test_routed_subnet().
#
sn = aim_resource.Subnet(
tenant_name = bd.tenant_name,
bd_name = bd.name,
gw_ip_mask='10.0.2.1/24')
self._test_aim_resource(sn, 'gw_ip_mask', '10.0.3.1/24')
def test_unrouted_network(self):
# Create network.
net_resp = self._make_network(self.fmt, 'net1', True)
self._validate()
# Test AIM resources.
self._test_network_resources(net_resp)
def test_external_network(self):
# Create AIM HostDomainMappingV2.
hd_mapping = aim_infra.HostDomainMappingV2(
host_name='*', domain_name='vm2', domain_type='OpenStack')
self.aim_mgr.create(self.aim_ctx, hd_mapping)
# Create external network.
kwargs = {'router:external': True,
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
self._validate()
# Test standard network AIM resources.
self._test_network_resources(net_resp)
# Test AIM L3Outside.
l3out = aim_resource.L3Outside(tenant_name='common', name='l1')
self._test_aim_resource(l3out)
# Test AIM ExternalNetwork.
en = aim_resource.ExternalNetwork(
tenant_name='common', l3out_name='l1', name='n1')
self._test_aim_resource(en)
# Test AIM ExternalSubnet.
esn = aim_resource.ExternalSubnet(
tenant_name='common', l3out_name='l1', external_network_name='n1',
cidr='0.0.0.0/0')
self._test_aim_resource(esn, 'cidr', '1.2.3.4/0')
# Test AIM VRF.
vrf = aim_resource.VRF(tenant_name='common', name='openstack_EXT-l1')
self._test_aim_resource(vrf)
# Test AIM ApplicationProfile.
ap = aim_resource.ApplicationProfile(
tenant_name='common', name='openstack_OpenStack')
self._test_aim_resource(ap)
# Test AIM Contract.
contract = aim_resource.Contract(
tenant_name='common', name='openstack_EXT-l1')
self._test_aim_resource(contract)
# Test AIM ContractSubject.
subject = aim_resource.ContractSubject(
tenant_name='common', contract_name='openstack_EXT-l1',
name='Allow')
self._test_aim_resource(subject)
# Test AIM Filter.
filter = aim_resource.Filter(
tenant_name='common', name='openstack_EXT-l1')
self._test_aim_resource(filter)
# Test AIM FilterEntry.
entry = aim_resource.FilterEntry(
tenant_name='common', filter_name='openstack_EXT-l1', name='Any')
self._test_aim_resource(entry)
def test_svi_network(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create SVI network.
kwargs = {'apic:svi': 'True'}
self._make_network(
self.fmt, 'net', True, arg_list=self.extension_attributes,
**kwargs)
# Test that validation fails.
self._validate_unrepairable()
def test_router(self):
# Create router.
router = self._make_router(
self.fmt, self._tenant_id, 'router1')['router']
contract_dn = router['apic:distinguished_names']['Contract']
subject_dn = router['apic:distinguished_names']['ContractSubject']
self._validate()
# Test AIM Contract.
contract = aim_resource.Contract.from_dn(contract_dn)
self._test_aim_resource(contract)
# Test AIM ContractSubject.
subject = aim_resource.ContractSubject.from_dn(subject_dn)
self._test_aim_resource(subject)
def test_scoped_routing(self):
# Create shared address scope and subnetpool as tenant_1.
scope = self._make_address_scope(
self.fmt, 4, admin=True, name='as1', tenant_id='tenant_1',
shared=True)['address_scope']
pool = self._make_subnetpool(
self.fmt, ['10.0.0.0/8'], admin=True, name='sp1',
tenant_id='tenant_1', address_scope_id=scope['id'],
default_prefixlen=24, shared=True)['subnetpool']
pool_id = pool['id']
# Create network and subnet as tenant_2.
net_resp = self._make_network(
self.fmt, 'net1', True, tenant_id='tenant_2')
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
subnetpool_id=pool_id, tenant_id='tenant_2')['subnet']
subnet_id = subnet['id']
# Create extra unrouted subnet.
self._make_subnet(
self.fmt, net_resp, '10.0.2.1', '10.0.2.0/24',
subnetpool_id=pool_id, tenant_id='tenant_2')
# Create external network.
#
kwargs = {'router:external': True,
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
ext_net = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)['network']
# Create extra external network to test CloneL3Out record below.
#
kwargs = {'router:external': True,
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l2/instP-n2'}}
self._make_network(
self.fmt, 'extra_ext_net', True,
arg_list=self.extension_attributes, **kwargs)
# Create router as tenant_2.
kwargs = {'apic:external_provided_contracts': ['p1', 'p2'],
'apic:external_consumed_contracts': ['c1', 'c2'],
'external_gateway_info': {'network_id': ext_net['id']}}
router = self._make_router(
self.fmt, 'tenant_2', 'router1',
arg_list=self.extension_attributes, **kwargs)['router']
router_id = router['id']
# Validate before adding subnet to router.
self._validate()
# Add subnet to router.
self.l3_plugin.add_router_interface(
n_context.get_admin_context(), router_id,
{'subnet_id': subnet_id})
self._validate()
# Test AIM Subnet.
self._test_routed_subnet(subnet_id, '10.0.1.1')
# Determine clone L3Outside identity based on VRF.
vrf_dn = scope['apic:distinguished_names']['VRF']
vrf = aim_resource.VRF.from_dn(vrf_dn)
tenant_name = vrf.tenant_name
l3out_name = 'l1-%s' % vrf.name
# Test AIM L3Outside.
l3out = aim_resource.L3Outside(
tenant_name=tenant_name, name=l3out_name)
self._test_aim_resource(l3out)
# Test AIM ExternalNetwork.
en = aim_resource.ExternalNetwork(
tenant_name=tenant_name, l3out_name=l3out_name, name='n1')
self._test_aim_resource(en)
# Test AIM ExternalSubnet.
esn = aim_resource.ExternalSubnet(
tenant_name=tenant_name, l3out_name=l3out_name,
external_network_name='n1', cidr='0.0.0.0/0')
self._test_aim_resource(esn, 'cidr', '1.2.3.4/0')
# Delete the CloneL3Out record and test.
(self.db_session.query(aim_lib_model.CloneL3Out).
filter_by(tenant_name=tenant_name, name=l3out_name).
delete())
self._validate_repair_validate()
# Corrupt the CloneL3Out record and test.
with self.db_session.begin():
record = (self.db_session.query(aim_lib_model.CloneL3Out).
filter_by(tenant_name=tenant_name, name=l3out_name).
one())
record.source_name = 'l2'
self._validate_repair_validate()
# Add monitored L3Out and unexpected CloneL3Out record and test.
with self.db_session.begin():
unexpected_l3out_name = 'l2-%s' % vrf.name
unexpected_l3out = aim_resource.L3Outside(
tenant_name=tenant_name, name=unexpected_l3out_name,
monitored=True)
self.aim_mgr.create(self.aim_ctx, unexpected_l3out)
record = aim_lib_model.CloneL3Out(
source_tenant_name='common', source_name='l2',
name=unexpected_l3out_name, tenant_name=tenant_name)
self.db_session.add(record)
self._validate_repair_validate()
def test_unscoped_routing(self):
# Create shared network and unscoped subnet as tenant_1.
net_resp = self._make_network(
self.fmt, 'net1', True, tenant_id='tenant_1', shared=True)
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
tenant_id='tenant_1')['subnet']
subnet1_id = subnet['id']
# Create unshared network and unscoped subnet as tenant_2.
net_resp = self._make_network(
self.fmt, 'net2', True, tenant_id='tenant_2')
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.2.1', '10.0.2.0/24',
tenant_id='tenant_2')['subnet']
subnet2_id = subnet['id']
# Create extra unrouted subnet.
self._make_subnet(
self.fmt, net_resp, '10.0.3.1', '10.0.3.0/24',
tenant_id='tenant_2')
# Create external network.
kwargs = {'router:external': True,
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
ext_net = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)['network']
# Create router as tenant_2.
kwargs = {'apic:external_provided_contracts': ['p1', 'p2'],
'apic:external_consumed_contracts': ['c1', 'c2'],
'external_gateway_info': {'network_id': ext_net['id']}}
router = self._make_router(
self.fmt, 'tenant_2', 'router1',
arg_list=self.extension_attributes, **kwargs)['router']
router_id = router['id']
# Validate before adding subnet to router.
self._validate()
# Add unshared subnet to router.
self.l3_plugin.add_router_interface(
n_context.get_admin_context(), router_id,
{'subnet_id': subnet2_id})
self._validate()
# Test AIM Subnet and VRF.
self._test_routed_subnet(subnet2_id, '10.0.2.1')
self._test_unscoped_vrf(router_id)
# Add shared subnet to router.
self.l3_plugin.add_router_interface(
n_context.get_admin_context(), router_id,
{'subnet_id': subnet1_id})
self._validate()
# Test AIM Subnets and VRF.
self._test_routed_subnet(subnet2_id, '10.0.2.1')
self._test_routed_subnet(subnet1_id, '10.0.1.1')
self._test_unscoped_vrf(router_id)
class TestGbpMapping(AimValidationTestCase):
def setUp(self):
super(TestGbpMapping, self).setUp()
def test_l3_policy(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create L3P.
self.create_l3_policy()
# Test that validation fails.
self._validate_unrepairable()
def test_l2_policy(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create L2P.
l2p = self.create_l2_policy()['l2_policy']
# Dissassociate and delete the implicitly-created L3P.
self.db_session.query(gpdb.L2Policy).filter_by(id=l2p['id']).update(
{'l3_policy_id': None})
self.delete_l3_policy(l2p['l3_policy_id'])
# Test that validation fails.
self._validate_unrepairable()
def test_policy_target_group(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create PTG.
self.create_policy_target_group()
# Dissassociating and deleting the implicitly-created L3P and
# L2P would require removing the router interface that has
# been created, which is not worth the effort for this
# temporary test implementation. Manual inspection of the
# validation output shows that validation is failing due to
# the PTG, as well as the other resources.
# Test that validation fails.
self._validate_unrepairable()
def test_policy_target(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create PTG.
ptg = self.create_policy_target_group()['policy_target_group']
# Create PT.
self.create_policy_target(policy_target_group_id=ptg['id'])
# Dissassociating and deleting the PTG, L3P and L2P is not
# worth the effort for this temporary test
# implementation. Manual inspection of the validation output
# shows that validation is failing due to the PT, as well as
# the other resources.
# Test that validation fails.
self._validate_unrepairable()
def test_application_policy_group(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create APG.
self.create_application_policy_group()
# Test that validation fails.
self._validate_unrepairable()
def test_policy_classifier(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create PC.
self.create_policy_classifier()
# Test that validation fails.
self._validate_unrepairable()
def test_policy_rule_set(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create PRS.
self.create_policy_rule_set()
# Test that validation fails.
self._validate_unrepairable()
def test_external_segment(self):
# REVISIT: Test validation of actual mapping once
# implemented. No AIM resources are created directly, but
# external_routes maps to the cisco_apic.EXTERNAL_CIDRS
# network extension.
# Create external network and subnet.
kwargs = {'router:external': True,
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']
# Create ES.
self.create_external_segment(
subnet_id=subnet['id'],
external_routes=[{'destination': '129.0.0.0/24', 'nexthop': None}])
# Test that validation fails.
self._validate_unrepairable()
def test_external_policy(self):
# REVISIT: Test validation of actual mapping once implemented.
# Create EP.
self.create_external_policy()
# Test that validation fails.
self._validate_unrepairable()
class TestSfcMapping(test_aim_sfc_driver.TestAIMServiceFunctionChainingBase,
AimValidationTestMixin):
def setUp(self):
super(TestSfcMapping, self).setUp()
self.av_mgr = av.ValidationManager()
self.aim_ctx = aim_context.AimContext(self.db_session)
def test_flow_classifier(self):
# REVISIT: Test validation of actual mapping once
# implemented. This resource is currently not mapped to AIM
# until used in a port chain, but there are plans to map it
# more proactively.
# Create FC.
self._create_simple_flowc()
# Test that validation fails.
self._validate_unrepairable()
def test_port_port_pair_group(self):
# REVISIT: Test validation of actual mapping once
# implemented. This resource is currently not mapped to AIM
# until used in a port chain, but there are plans to map it
# more proactively.
# Create PPG.
self._create_simple_ppg(pairs=1)
# Test that validation fails.
self._validate_unrepairable()
def test_port_chain(self):
# REVISIT: Test validation of actual mapping once
# implemented.
# Create PC (along with PPG and FC).
self._create_simple_port_chain(ppgs=1)
# Deleting the PPG and FC, if possible, would ensure that the
# PC itself is causing validation to fail, but is not worth
# the effort for this temporary test implementation. Manual
# inspection of the validation output shows that validation is
# failing due to the PC, as well as the other resources.
# Test that validation fails.
self._validate_unrepairable()

View File

@ -0,0 +1,55 @@
#!/usr/bin/env bash
# **neutron_aim.sh**
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
echo "*********************************************************************"
# Settings
# ========
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
# Import common functions
source $TOP_DIR/functions
# Import configuration
source $TOP_DIR/openrc
# Import exercise configuration
source $TOP_DIR/exerciserc
source $TOP_DIR/openrc demo demo
VALIDATE_OPTS=${VALIDATE_OPTS:-"--config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini"}
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Validate any pre-existing resources
gbp-validate $VALIDATE_OPTS
# Create network
openstack network create net1
# Validate
gbp-validate $VALIDATE_OPTS
# Delete network
openstack network delete net1
# Validate
gbp-validate $VALIDATE_OPTS
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End DevStack Exercise: $0"
echo "*********************************************************************"

View File

@ -75,6 +75,7 @@ function prepare_gbp_aim_devstack {
sudo cp $CONTRIB_DIR/devstack/local-aim.conf $TOP_DIR/local.conf
append_to_localconf
sudo cp $CONTRIB_DIR/devstack/exercises-aim/gbp_aim.sh $TOP_DIR/exercises/
sudo cp $CONTRIB_DIR/devstack/exercises-aim/neutron_aim.sh $TOP_DIR/exercises/
# Use the aim version of the shared PRS test
sudo mv $GBP_FUNC_DIR/testcases/tc_gbp_prs_pr_shared_func.py.aim $GBP_FUNC_DIR/testcases/tc_gbp_prs_pr_shared_func.py
sudo mv $GBP_FUNC_DIR/testcases/tc_gbp_prs_func.py.aim $GBP_FUNC_DIR/testcases/tc_gbp_prs_func.py

View File

View File

View File

@ -0,0 +1,55 @@
# Copyright (c) 2018 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from neutron.common import config
from neutron import manager
from neutron_lib.plugins import directory
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
# Any policy-driver specific CLI options must be included here, since
# the CLI options must be registered before the GBP service plugin and
# the configured policy drivers can be loaded.
cli_opts = [
cfg.BoolOpt('repair', default=False, help='Enable repair of invalid state.')
]
def main():
cfg.CONF.register_cli_opts(cli_opts)
config.init(sys.argv[1:])
# REVISIT: Should enable logging but prevent output to stdout.
# config.setup_logging()
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the default"
" search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and"
" the '--config-file' option!"))
manager.init()
gbp_plugin = directory.get_plugin('GROUP_POLICY')
if not gbp_plugin:
sys.exit("GBP service plugin not configured.")
result = gbp_plugin.validate_state(cfg.CONF.repair)
if result == api.VALIDATION_FAILED:
sys.exit("Validation failed")
return 0

View File

@ -40,6 +40,7 @@ scripts =
[entry_points]
console_scripts=
gbp-db-manage = gbpservice.neutron.db.migration.cli:main
gbp-validate = gbpservice.tools.validate.cli:main
neutron.core_plugins =
ml2plus = gbpservice.neutron.plugins.ml2plus.plugin:Ml2PlusPlugin
neutron.service_plugins =