Remove leftover MH code and configs

Change-Id: Iadff06ee8ac7a68d120f432e3fa5a599c9d65565
This commit is contained in:
asarfaty 2020-07-29 17:29:39 +02:00
parent 6b8bf4f7c0
commit 9895c39488
5 changed files with 2 additions and 377 deletions

View File

@ -28,7 +28,6 @@ console_scripts =
neutron.db.alembic_migrations =
vmware-nsx = vmware_nsx.db.migration:alembic_migrations
neutron.core_plugins =
vmware_nsx = vmware_nsx.plugin:NsxPlugin
vmware_nsxv = vmware_nsx.plugin:NsxVPlugin
vmware_nsxv3 = vmware_nsx.plugin:NsxV3Plugin
vmware_nsxp = vmware_nsx.plugin:NsxPolicyPlugin

View File

@ -123,54 +123,6 @@ base_opts = [
" 1")),
]
sync_opts = [
cfg.IntOpt('state_sync_interval', default=10,
deprecated_group='NVP_SYNC',
help=_("Interval in seconds between runs of the status "
"synchronization task. The plugin will aim at "
"resynchronizing operational status for all resources "
"in this interval, and it should be therefore large "
"enough to ensure the task is feasible. Otherwise the "
"plugin will be constantly synchronizing resource "
"status, ie: a new task is started as soon as the "
"previous is completed. If this value is set to 0, the "
"state synchronization thread for this Neutron instance "
"will be disabled.")),
cfg.IntOpt('max_random_sync_delay', default=0,
deprecated_group='NVP_SYNC',
help=_("Random additional delay between two runs of the state "
"synchronization task. An additional wait time between "
"0 and max_random_sync_delay seconds will be added on "
"top of state_sync_interval.")),
cfg.IntOpt('min_sync_req_delay', default=1,
deprecated_group='NVP_SYNC',
help=_("Minimum delay, in seconds, between two status "
"synchronization requests for NSX. Depending on chunk "
"size, controller load, and other factors, state "
"synchronization requests might be pretty heavy. This "
"means the controller might take time to respond, and "
"its load might be quite increased by them. This "
"parameter allows to specify a minimum interval between "
"two subsequent requests. The value for this parameter "
"must never exceed state_sync_interval. If this does, "
"an error will be raised at startup.")),
cfg.IntOpt('min_chunk_size', default=500,
deprecated_group='NVP_SYNC',
help=_("Minimum number of resources to be retrieved from NSX "
"in a single status synchronization request. The actual "
"size of the chunk will increase if the number of "
"resources is such that using the minimum chunk size "
"will cause the interval between two requests to be "
"less than min_sync_req_delay")),
cfg.BoolOpt('always_read_status', default=False,
deprecated_group='NVP_SYNC',
help=_("Enable this option to allow punctual state "
"synchronization on show operations. In this way, show "
"operations will always fetch the operational status "
"of the resource from the NSX backend, and this might "
"have a considerable impact on overall performance."))
]
connection_opts = [
cfg.StrOpt('nsx_user',
default='admin',
@ -1038,7 +990,6 @@ cfg.CONF.register_opts(nsx_v3_opts, group="nsx_v3")
cfg.CONF.register_opts(nsxv_opts, group="nsxv")
cfg.CONF.register_opts(nsx_tvd_opts, group="nsx_tvd")
cfg.CONF.register_opts(base_opts, group="NSX")
cfg.CONF.register_opts(sync_opts, group="NSX_SYNC")
# register l3_ha config opts. This is due to commit
# a7c633dc8e8a67e65e558ecbdf9ea8efc5468251

View File

@ -14,15 +14,12 @@
# under the License.
#
from neutron_lib import constants
from neutron_lib import exceptions as exception
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx._i18n import _
from vmware_nsx.api_client import exception as api_exc
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import utils
from vmware_nsx.nsxlib import mh as nsxlib
@ -64,17 +61,6 @@ def _configure_extensions(lport_obj, mac_address, fixed_ips,
'ip_address': address_pair['ip_address']})
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = nsxlib._build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
@ -107,6 +93,7 @@ def get_lswitches(cluster, neutron_net_id):
raise exception.NetworkNotFound(net_id=neutron_net_id)
# This api is currently used only for unittests
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
shared=None,
@ -132,55 +119,6 @@ def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": utils.check_and_truncate(display_name)}
# NOTE: tag update will not 'merge' existing tags with new ones.
tags = []
if tenant_id:
tags = utils.get_tags(os_tid=tenant_id)
# The 'tags' kwarg might existing and be None
tags.extend(kwargs.get('tags') or [])
if tags:
lswitch_obj['tags'] = tags
try:
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
LOG.error("Network not found, Error: %s", str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
LOG.error("Network not found, Error: %s", str(e))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=ls_uuid,
fields=fields,
filters=filters,
relations=relations)
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
@ -193,73 +131,6 @@ def delete_port(cluster, switch, port):
raise exception.NeutronException()
def get_ports(cluster, networks=None, devices=None, tenants=None):
vm_filter_obsolete = ""
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Neutron checks to see if
# the network has any ports.
if networks:
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = networks[0]
else:
lswitch = "*"
if devices:
for device_id in devices:
vm_filter_obsolete = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id,
obfuscate=True),
vm_filter_obsolete])
vm_filter = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id),
vm_filter])
if tenants:
for tenant in tenants:
tenant_filter = '&'.join(
["tag_scope=os_tid",
"tag=%s" % tenant,
tenant_filter])
nsx_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path_obsolete = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
# NOTE(armando-migliaccio): by querying with obsolete tag first
# current deployments won't take the performance hit of a double
# call. In release L-** or M-**, we might want to swap the calls
# as it's likely that ports with the new tag would outnumber the
# ones with the old tag
ports = nsxlib.get_all_query_pages(lport_query_path_obsolete,
cluster)
if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warning("Lswitch %s not found in NSX", lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nsx_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
return nsx_lports
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
@ -287,6 +158,7 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
return res["results"][0]
# This api is currently used only for unittests
def get_port(cluster, network, port, relations=None):
LOG.info("get_port() %(network)s %(port)s",
{'network': network, 'port': port})
@ -301,37 +173,6 @@ def get_port(cluster, network, port, relations=None):
port_id=port, net_id=network)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=utils.check_and_truncate(display_name),
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id)))
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
cluster=cluster)
LOG.debug("Updated logical port %(result)s "
"on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
LOG.error("Port or Network not found, Error: %s", str(e))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
@ -360,39 +201,3 @@ def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = nsxlib.do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
LOG.error("Port not found, Error: %s", str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def plug_interface(cluster, lswitch_id, lport_id, att_obj):
return nsxlib.do_request(HTTP_PUT,
nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
lport_id, lswitch_id,
is_attachment=True),
jsonutils.dumps(att_obj),
cluster=cluster)
def plug_vif_interface(
cluster, lswitch_id, port_id, port_type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = port_type
return plug_interface(cluster, lswitch_id, port_id, lport_obj)

View File

@ -25,7 +25,6 @@ def list_opts():
vmware_nsx.common.config.connection_opts,
vmware_nsx.common.config.nsx_common_opts)),
('NSX', vmware_nsx.common.config.base_opts),
('NSX_SYNC', vmware_nsx.common.config.sync_opts),
('nsxv', vmware_nsx.common.config.nsxv_opts),
('nsx_v3', vmware_nsx.common.config.nsx_v3_opts),
('dvs', vmware_nsx.dvs.dvs_utils.dvs_opts),

View File

@ -14,14 +14,9 @@
# limitations under the License.
#
import hashlib
from unittest import mock
from neutron.tests.unit.api.v2 import test_base
from neutron_lib import constants
from neutron_lib import exceptions
from vmware_nsx.common import utils
from vmware_nsx.nsxlib.mh import switch as switchlib
from vmware_nsx.tests.unit.nsxlib.mh import base
@ -95,64 +90,6 @@ class LogicalSwitchesTestCase(base.NsxlibTestCase):
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def _test_update_lswitch(self, tenant_id, name, tags):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'],
name, tenant_id=tenant_id, tags=tags)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['display_name'], name)
if not tags:
# no need to validate tags
return
switch_tags = self._build_tag_dict(res_lswitch[0]['tags'])
for tag in tags:
self.assertIn(tag['scope'], switch_tags)
self.assertEqual(tag['tag'], switch_tags[tag['scope']])
def test_update_lswitch(self):
self._test_update_lswitch(None, 'new-name',
[{'scope': 'new_tag', 'tag': 'xxx'}])
def test_update_lswitch_no_tags(self):
self._test_update_lswitch(None, 'new-name', None)
def test_update_lswitch_tenant_id(self):
self._test_update_lswitch('whatever', 'new-name', None)
def test_update_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.update_lswitch,
self.fake_cluster, 'whatever',
'foo', 'bar')
def test_delete_networks(self):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.delete_networks(self.fake_cluster, lswitch['uuid'],
[lswitch['uuid']])
self.assertRaises(exceptions.NotFound,
switchlib.get_lswitches,
self.fake_cluster,
lswitch['uuid'])
def test_delete_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.delete_networks,
self.fake_cluster, 'whatever', ['whatever'])
class LogicalPortsTestCase(base.NsxlibTestCase):
@ -180,14 +117,6 @@ class LogicalPortsTestCase(base.NsxlibTestCase):
relations='LogicalPortStatus')
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_plug_interface(self):
lswitch, lport = self._create_switch_and_port()
switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'],
lport['uuid'], 'VifAttachment', 'fake')
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_get_port_by_tag(self):
lswitch, lport = self._create_switch_and_port()
lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster,
@ -221,33 +150,12 @@ class LogicalPortsTestCase(base.NsxlibTestCase):
self.fake_cluster, '*', neutron_port_id)
self.assertIsNone(lport)
def test_get_port_status(self):
lswitch, lport = self._create_switch_and_port()
status = switchlib.get_port_status(
self.fake_cluster, lswitch['uuid'], lport['uuid'])
self.assertEqual(constants.PORT_STATUS_ACTIVE, status)
def test_get_port_status_non_existent_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.get_port_status,
self.fake_cluster,
'boo', 'boo')
def test_update_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.update_port(
self.fake_cluster, lswitch['uuid'], lport['uuid'],
'neutron_port_id', 'pippo2', 'new_name', 'device_id', False)
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
self.assertEqual('new_name', lport_res['display_name'])
self.assertEqual('False', lport_res['admin_status_enabled'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertIn('vm_id', port_tags)
def test_create_port_device_id_less_than_40_chars(self):
lswitch, lport = self._create_switch_and_port()
lport_res = switchlib.get_port(self.fake_cluster,
@ -263,29 +171,6 @@ class LogicalPortsTestCase(base.NsxlibTestCase):
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertNotEqual(len(dev_id), len(port_tags['vm_id']))
def test_get_ports_with_obsolete_and_new_vm_id_tag(self):
def obsolete(device_id, obfuscate=False):
return hashlib.sha1(device_id.encode()).hexdigest()
with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete):
dev_id1 = "short-dev-id-1"
_, lport1 = self._create_switch_and_port(device_id=dev_id1)
dev_id2 = "short-dev-id-2"
_, lport2 = self._create_switch_and_port(device_id=dev_id2)
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertNotEqual(dev_id1, port_tags['vm_id'])
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertEqual(dev_id2, port_tags['vm_id'])
def test_update_non_existent_port_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.update_port, self.fake_cluster,
'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False)
def test_delete_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.delete_port(self.fake_cluster,
@ -299,17 +184,3 @@ class LogicalPortsTestCase(base.NsxlibTestCase):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.delete_port, self.fake_cluster,
lswitch['uuid'], 'bad_port_uuid')
def test_query_lswitch_ports(self):
lswitch, lport = self._create_switch_and_port()
switch_port_uuids = [
switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
for k in range(2)]
switch_port_uuids.append(lport['uuid'])
ports = switchlib.query_lswitch_lports(
self.fake_cluster, lswitch['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], switch_port_uuids)