Add nova compute patches

There are some important patches in nova computes introduced recently,
this patch it to apply these patches and add corresponding Change_Id:

compute node:
1. live-migration-iscsi.patch
2. support-vif-hotplug.patch
3. fix-rescue-vm.patch
4. live-migration-vifmapping.patch
5. assert_can_migrated.patch

controller node:
1. live-migration-vifmapping-controller.patch

Change-Id: Ib0bfb97fa61323d101ea49faaf410d1576ca111b
This commit is contained in:
Huan Xie 2017-02-06 19:31:20 -08:00
parent 4b76575684
commit e4ab4f95bc
9 changed files with 739 additions and 4 deletions

View File

@ -15,7 +15,6 @@ from utils import HIMN_IP
INT_BRIDGE = 'br-int'
XS_PLUGIN_ISO = 'xenapi-plugins-mitaka.iso'
DIST_PACKAGES_DIR = '/usr/lib/python2.7/dist-packages/'
CONNTRACK_CONF_SAMPLE =\
'/usr/share/doc/conntrack-tools-1.4.2/doc/stats/conntrackd.conf'
@ -38,7 +37,7 @@ def get_endpoints(astute):
def install_xenapi_sdk():
"""Install XenAPI Python SDK"""
utils.execute('cp', 'XenAPI.py', DIST_PACKAGES_DIR)
utils.execute('cp', 'XenAPI.py', utils.DIST_PACKAGES_DIR)
def create_novacompute_conf(himn, username, password, public_ip, services_ssl):
@ -314,7 +313,7 @@ def patch_ceilometer():
'ceilometer-add-purge_inspection_cache.patch',
]
for patch_file in patchfile_list:
utils.patch(DIST_PACKAGES_DIR, patch_file, 1)
utils.patch(utils.DIST_PACKAGES_DIR, patch_file, 1)
def patch_compute_xenapi():
@ -325,15 +324,35 @@ def patch_compute_xenapi():
speed-up-config-drive.patch
ovs-interim-bridge.patch
neutron-security-group.patch
live-migration-iscsi.patch
support-vif-hotplug.patch
fix-rescue-vm.patch
live-migration-vifmapping.patch
"""
patchfile_list = [
# Change-Id: I5ebff2c1f7534b06233a4d41d7f5f2e5e3b60b5a
'support-disable-image-cache.patch',
# Change-Id: I359e17d6d5838f4028df0bd47e4825de420eb383
'speed-up-config-drive.patch',
# Change-Id: I0cfc0284e1fcd1a6169d31a7ad410716037e5cc2
'ovs-interim-bridge.patch',
# Change-Id: Id9b39aa86558a9f7099caedabd2d517bf8ad3d68
'neutron-security-group.patch',
# Change-Id: I88d1d384ab7587c428e517d184258bb517dfb4ab
'live-migration-iscsi.patch',
# Change-Id: I22f3fe52d07100592015007653c7f8c47c25d22c
'support-vif-hotplug.patch',
# Change-Id: I32c66733330bc9877caea7e2a2290c02b3906708
'fix-rescue-vm.patch',
# Change-Id: If0fb5d764011521916fbbe15224f524a220052f3
'live-migration-vifmapping.patch',
# TODO(huanxie): below patch isn't merged into upstream yet,
# it only affects XS7.1 and later
# Change-Id: I31850b25e2f32eb65a00fbb824b08646c9ed340a
'assert_can_migrated.patch',
]
for patch_file in patchfile_list:
utils.patch(DIST_PACKAGES_DIR, patch_file, 1)
utils.patch(utils.DIST_PACKAGES_DIR, patch_file, 1)
def patch_neutron_ovs_agent():

View File

@ -36,5 +36,23 @@ def mod_novnc():
utils.reportError('Cannot set configurations to %s' % filename)
def patch_nova_conductor():
"""Add patches which are not merged to upstream
Order of patches applied:
live-migration-vifmapping-controller.patch
"""
patchfile_list = [
# Change-Id: If0fb5d764011521916fbbe15224f524a220052f3
'live-migration-vifmapping-controller.patch',
]
for patch_file in patchfile_list:
utils.patch(utils.DIST_PACKAGES_DIR, patch_file, 1)
# Restart related service
utils.execute('service', 'nova-conductor', 'restart')
if __name__ == '__main__':
patch_nova_conductor()
mod_novnc()

View File

@ -0,0 +1,29 @@
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 82a9aef..d5048cd 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -2278,10 +2278,11 @@ class VMOps(object):
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, dest_check_data)
except self._session.XenAPI.Failure as exc:
- reason = exc.details[0]
- msg = _('assert_can_migrate failed because: %s') % reason
- LOG.debug(msg, exc_info=True)
- raise exception.MigrationPreCheckError(reason=msg)
+ reason = '%s' % exc.details[0]
+ if reason.strip().upper() != "VIF_NOT_IN_MAP":
+ msg = _('assert_can_migrate failed because: %s') % reason
+ LOG.debug(msg, exc_info=True)
+ raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def _ensure_pv_driver_info_for_live_migration(self, instance, vm_ref):
@@ -2500,6 +2501,8 @@ class VMOps(object):
def post_live_migration_at_destination(self, context, instance,
network_info, block_migration,
block_device_info):
+ # Hook interim bridge with ovs bridge
+ self._post_start_actions(instance)
# FIXME(johngarbutt): we should block all traffic until we have
# applied security groups, however this requires changes to XenServer
self._prepare_instance_filter(instance, network_info)

View File

@ -0,0 +1,71 @@
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 6b07a62..ac271e3 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -463,14 +463,15 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# Create Linux bridge qbrXXX
linux_br_name = self._create_linux_bridge(vif_rec)
- LOG.debug("create veth pair for interim bridge %(interim_bridge)s and "
- "linux bridge %(linux_bridge)s",
- {'interim_bridge': bridge_name,
- 'linux_bridge': linux_br_name})
- self._create_veth_pair(tap_name, patch_port1)
- self._brctl_add_if(linux_br_name, tap_name)
- # Add port to interim bridge
- self._ovs_add_port(bridge_name, patch_port1)
+ if not self._device_exists(tap_name):
+ LOG.debug("create veth pair for interim bridge %(interim_bridge)s "
+ "and linux bridge %(linux_bridge)s",
+ {'interim_bridge': bridge_name,
+ 'linux_bridge': linux_br_name})
+ self._create_veth_pair(tap_name, patch_port1)
+ self._brctl_add_if(linux_br_name, tap_name)
+ # Add port to interim bridge
+ self._ovs_add_port(bridge_name, patch_port1)
def get_vif_interim_net_name(self, vif):
return ("net-" + vif['id'])[:network_model.NIC_NAME_LEN]
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 182873f..e44117e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -603,15 +603,18 @@ class VMOps(object):
# for neutron event regardless of whether or not it is
# migrated to another host, if unplug VIFs locally, the
# port status may not changed in neutron side and we
- # cannot get the vif plug event from neturon
+ # cannot get the vif plug event from neutron
+ # rescue is True in rescued instance and the port in neutron side
+ # won't change, so we don't wait event from neutron
timeout = CONF.vif_plugging_timeout
- events = self._get_neutron_events(network_info,
- power_on, first_boot)
+ events = self._get_neutron_events(network_info, power_on,
+ first_boot, rescue)
try:
with self._virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
- LOG.debug("wait for instance event:%s", events)
+ LOG.debug("wait for instance event:%s", events,
+ instance=instance)
setup_network_step(undo_mgr, vm_ref)
if rescue:
attach_orig_disks_step(undo_mgr, vm_ref)
@@ -647,11 +650,13 @@ class VMOps(object):
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
- def _get_neutron_events(self, network_info, power_on, first_boot):
+ def _get_neutron_events(self, network_info, power_on, first_boot, rescue):
# Only get network-vif-plugged events with VIF's status is not active.
# With VIF whose status is active, neutron may not notify such event.
+ # Don't get network-vif-plugged events from rescued VM or migrated VM
timeout = CONF.vif_plugging_timeout
- if (utils.is_neutron() and power_on and timeout and first_boot):
+ if (utils.is_neutron() and power_on and timeout and first_boot and
+ not rescue):
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
else:

View File

@ -0,0 +1,64 @@
diff --git a/nova/virt/xenapi/client/session.py b/nova/virt/xenapi/client/session.py
index 70e9bec..80bf235 100644
--- a/nova/virt/xenapi/client/session.py
+++ b/nova/virt/xenapi/client/session.py
@@ -102,8 +102,9 @@ class XenAPISession(object):
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
-
self._verify_plugin_version()
+ self.platform_version = self._get_platform_version()
+ self._cached_xsm_sr_relaxed = None
apply_session_helpers(self)
@@ -177,6 +178,15 @@ class XenAPISession(object):
return product_version, product_brand
+ def _get_platform_version(self):
+ """Return a tuple of (major, minor, rev) for the host version"""
+ software_version = self._get_software_version()
+ platform_version_str = software_version.get('platform_version',
+ '0.0.0')
+ platform_version = versionutils.convert_version_to_tuple(
+ platform_version_str)
+ return platform_version
+
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
@@ -328,3 +338,19 @@ class XenAPISession(object):
"""
return self.call_xenapi('%s.get_all_records' % record_type).items()
+
+ def is_xsm_sr_check_relaxed(self):
+ if self._cached_xsm_sr_relaxed is None:
+ config_value = self.call_plugin('config_file', 'get_val',
+ key='relax-xsm-sr-check')
+ if not config_value:
+ version_str = '.'.join(str(v) for v in self.platform_version)
+ if versionutils.is_compatible('2.1.0', version_str,
+ same_major=False):
+ self._cached_xsm_sr_relaxed = True
+ else:
+ self._cached_xsm_sr_relaxed = False
+ else:
+ self._cached_xsm_sr_relaxed = config_value.lower() == 'true'
+
+ return self._cached_xsm_sr_relaxed
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 51d9627..1c93eac 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -2257,7 +2257,7 @@ class VMOps(object):
if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0:
# XAPI must support the relaxed SR check for live migrating with
# iSCSI VBDs
- if not self._is_xsm_sr_check_relaxed():
+ if not self._session.is_xsm_sr_check_relaxed():
raise exception.MigrationError(reason=_('XAPI supporting '
'relax-xsm-sr-check=true required'))

View File

@ -0,0 +1,49 @@
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index 44dce4f..07a16ea 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -210,7 +210,9 @@ class LibvirtLiveMigrateData(LiveMigrateData):
@obj_base.NovaObjectRegistry.register
class XenapiLiveMigrateData(LiveMigrateData):
- VERSION = '1.0'
+ # Version 1.0: Initial version
+ # Version 1.1: Added vif_uuid_map
+ VERSION = '1.1'
fields = {
'block_migration': fields.BooleanField(nullable=True),
@@ -219,6 +221,7 @@ class XenapiLiveMigrateData(LiveMigrateData):
'sr_uuid_map': fields.DictOfStringsField(),
'kernel_file': fields.StringField(),
'ramdisk_file': fields.StringField(),
+ 'vif_uuid_map': fields.DictOfStringsField(),
}
def to_legacy_dict(self, pre_migration_result=False):
@@ -233,6 +236,8 @@ class XenapiLiveMigrateData(LiveMigrateData):
live_result = {
'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map
or {}),
+ 'vif_uuid_map': ('vif_uuid_map' in self and self.vif_uuid_map
+ or {}),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
@@ -252,6 +257,16 @@ class XenapiLiveMigrateData(LiveMigrateData):
if 'pre_live_migration_result' in legacy:
self.sr_uuid_map = \
legacy['pre_live_migration_result']['sr_uuid_map']
+ self.vif_uuid_map = \
+ legacy['pre_live_migration_result'].get('vif_uuid_map', {})
+
+ def obj_make_compatible(self, primitive, target_version):
+ super(XenapiLiveMigrateData, self).obj_make_compatible(
+ primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 1):
+ if 'vif_uuid_map' in primitive:
+ del primitive['vif_uuid_map']
@obj_base.NovaObjectRegistry.register

View File

@ -0,0 +1,253 @@
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index 44dce4f..07a16ea 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -210,7 +210,9 @@ class LibvirtLiveMigrateData(LiveMigrateData):
@obj_base.NovaObjectRegistry.register
class XenapiLiveMigrateData(LiveMigrateData):
- VERSION = '1.0'
+ # Version 1.0: Initial version
+ # Version 1.1: Added vif_uuid_map
+ VERSION = '1.1'
fields = {
'block_migration': fields.BooleanField(nullable=True),
@@ -219,6 +221,7 @@ class XenapiLiveMigrateData(LiveMigrateData):
'sr_uuid_map': fields.DictOfStringsField(),
'kernel_file': fields.StringField(),
'ramdisk_file': fields.StringField(),
+ 'vif_uuid_map': fields.DictOfStringsField(),
}
def to_legacy_dict(self, pre_migration_result=False):
@@ -233,6 +236,8 @@ class XenapiLiveMigrateData(LiveMigrateData):
live_result = {
'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map
or {}),
+ 'vif_uuid_map': ('vif_uuid_map' in self and self.vif_uuid_map
+ or {}),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
@@ -252,6 +257,16 @@ class XenapiLiveMigrateData(LiveMigrateData):
if 'pre_live_migration_result' in legacy:
self.sr_uuid_map = \
legacy['pre_live_migration_result']['sr_uuid_map']
+ self.vif_uuid_map = \
+ legacy['pre_live_migration_result'].get('vif_uuid_map', {})
+
+ def obj_make_compatible(self, primitive, target_version):
+ super(XenapiLiveMigrateData, self).obj_make_compatible(
+ primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 1):
+ if 'vif_uuid_map' in primitive:
+ del primitive['vif_uuid_map']
@obj_base.NovaObjectRegistry.register
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 899c083..1639861 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -569,6 +569,7 @@ class XenAPIDriver(driver.ComputeDriver):
# any volume that was attached to the destination during
# live migration. XAPI should take care of all other cleanup.
self._vmops.rollback_live_migration_at_destination(instance,
+ network_info,
block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
@@ -594,6 +595,16 @@ class XenAPIDriver(driver.ComputeDriver):
"""
self._vmops.post_live_migration(context, instance, migrate_data)
+ def post_live_migration_at_source(self, context, instance, network_info):
+ """Unplug VIFs from networks at source.
+
+ :param context: security context
+ :param instance: instance object reference
+ :param network_info: instance network information
+ """
+ self._vmops.post_live_migration_at_source(context, instance,
+ network_info)
+
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index ac271e3..6925426 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -88,6 +88,9 @@ class XenVIFDriver(object):
raise exception.NovaException(
reason=_("Failed to unplug vif %s") % vif)
+ def get_vif_interim_net_name(self, vif_id):
+ return ("net-" + vif_id)[:network_model.NIC_NAME_LEN]
+
def hot_plug(self, vif, instance, vm_ref, vif_ref):
"""hotplug virtual interface to running instance.
:param nova.network.model.VIF vif:
@@ -126,10 +129,20 @@ class XenVIFDriver(object):
"""
pass
+ def create_vif_interim_network(self, vif):
+ pass
+
+ def delete_network_and_bridge(self, instance, vif):
+ pass
+
class XenAPIBridgeDriver(XenVIFDriver):
"""VIF Driver for XenAPI that uses XenAPI to create Networks."""
+ # NOTE(huanxie): This driver uses linux bridge as backend for XenServer,
+ # it only supports nova network, for using neutron, you should use
+ # XenAPIOpenVswitchDriver
+
def plug(self, instance, vif, vm_ref=None, device=None):
if not vm_ref:
vm_ref = vm_utils.lookup(self._session, instance['name'])
@@ -279,8 +292,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
4. delete linux bridge qbr and related ports if exist
"""
super(XenAPIOpenVswitchDriver, self).unplug(instance, vif, vm_ref)
-
- net_name = self.get_vif_interim_net_name(vif)
+ net_name = self.get_vif_interim_net_name(vif['id'])
network = network_utils.find_network_with_name_label(
self._session, net_name)
if network is None:
@@ -292,6 +304,16 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# source and target VM will be connected to the same
# interim network.
return
+ self.delete_network_and_bridge(instance, vif)
+
+ def delete_network_and_bridge(self, instance, vif):
+ net_name = self.get_vif_interim_net_name(vif['id'])
+ network = network_utils.find_network_with_name_label(
+ self._session, net_name)
+ if network is None:
+ LOG.debug("Didn't find network by name %s", net_name,
+ instance=instance)
+ return
LOG.debug('destroying patch port pair for vif: vif_id=%(vif_id)s',
{'vif_id': vif['id']})
bridge_name = self._session.network.get_bridge(network)
@@ -473,11 +495,8 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# Add port to interim bridge
self._ovs_add_port(bridge_name, patch_port1)
- def get_vif_interim_net_name(self, vif):
- return ("net-" + vif['id'])[:network_model.NIC_NAME_LEN]
-
def create_vif_interim_network(self, vif):
- net_name = self.get_vif_interim_net_name(vif)
+ net_name = self.get_vif_interim_net_name(vif['id'])
network_rec = {'name_label': net_name,
'name_description': "interim network for vif",
'other_config': {}}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e44117e..82a9aef 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -2388,11 +2388,41 @@ class VMOps(object):
self._generate_vdi_map(
sr_uuid_map[sr_uuid], vm_ref, sr_ref))
vif_map = {}
+ vif_uuid_map = None
+ if 'vif_uuid_map' in migrate_data:
+ vif_uuid_map = migrate_data.vif_uuid_map
+ if vif_uuid_map:
+ vif_map = self._generate_vif_network_map(vm_ref, vif_uuid_map)
+ LOG.debug("Generated vif_map for live migration: %s", vif_map)
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
+ def _generate_vif_network_map(self, vm_ref, vif_uuid_map):
+ # Generate a mapping dictionary of src_vif_ref: dest_network_ref
+ vif_map = {}
+ # vif_uuid_map is dictionary of neutron_vif_uuid: dest_network_ref
+ vifs = self._session.VM.get_VIFs(vm_ref)
+ for vif in vifs:
+ other_config = self._session.VIF.get_other_config(vif)
+ neutron_id = other_config.get('nicira-iface-id')
+ if neutron_id is None or neutron_id not in vif_uuid_map.keys():
+ raise exception.MigrationError(
+ reason=_('No mapping for source network %s') % (
+ neutron_id))
+ network_ref = vif_uuid_map[neutron_id]
+ vif_map[vif] = network_ref
+ return vif_map
+
+ def create_interim_networks(self, network_info):
+ # Creating an interim bridge in destination host before live_migration
+ vif_map = {}
+ for vif in network_info:
+ network_ref = self.vif_driver.create_vif_interim_network(vif)
+ vif_map.update({vif['id']: network_ref})
+ return vif_map
+
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
if migrate_data is None:
@@ -2401,6 +2431,11 @@ class VMOps(object):
migrate_data.sr_uuid_map = self.connect_block_device_volumes(
block_device_info)
+ migrate_data.vif_uuid_map = self.create_interim_networks(network_info)
+ LOG.debug("pre_live_migration, vif_uuid_map: %(vif_map)s, "
+ "sr_uuid_map: %(sr_map)s",
+ {'vif_map': migrate_data.vif_uuid_map,
+ 'sr_map': migrate_data.sr_uuid_map}, instance=instance)
return migrate_data
def live_migrate(self, context, instance, destination_hostname,
@@ -2457,6 +2492,11 @@ class VMOps(object):
migrate_data.kernel_file,
migrate_data.ramdisk_file)
+ def post_live_migration_at_source(self, context, instance, network_info):
+ LOG.debug('post_live_migration_at_source, delete networks and bridges',
+ instance=instance)
+ self._delete_networks_and_bridges(instance, network_info)
+
def post_live_migration_at_destination(self, context, instance,
network_info, block_migration,
block_device_info):
@@ -2471,7 +2511,7 @@ class VMOps(object):
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.strip_base_mirror_from_vdis(self._session, vm_ref)
- def rollback_live_migration_at_destination(self, instance,
+ def rollback_live_migration_at_destination(self, instance, network_info,
block_device_info):
bdms = block_device_info['block_device_mapping'] or []
@@ -2488,6 +2528,20 @@ class VMOps(object):
LOG.exception(_LE('Failed to forget the SR for volume %s'),
params['id'], instance=instance)
+ # delete VIF and network in destination host
+ LOG.debug('rollback_live_migration_at_destination, delete networks '
+ 'and bridges', instance=instance)
+ self._delete_networks_and_bridges(instance, network_info)
+
+ def _delete_networks_and_bridges(self, instance, network_info):
+ # Unplug VIFs and delete networks
+ for vif in network_info:
+ try:
+ self.vif_driver.delete_network_and_bridge(instance, vif)
+ except Exception:
+ LOG.exception(_LE('Failed to delete networks and bridges with '
+ 'VIF %s'), vif['id'], instance=instance)
+
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}

View File

@ -0,0 +1,231 @@
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 77483fa..899c083 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -104,6 +104,13 @@ OVERHEAD_PER_VCPU = 1.5
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
+ capabilities = {
+ "has_imagecache": False,
+ "supports_recreate": False,
+ "supports_migrate_to_same_host": False,
+ "supports_attach_interface": True,
+ "supports_device_tagging": False,
+ }
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
@@ -681,3 +688,39 @@ class XenAPIDriver(driver.ComputeDriver):
:returns: dict of nova uuid => dict of usage info
"""
return self._vmops.get_per_instance_usage()
+
+ def attach_interface(self, context, instance, image_meta, vif):
+ """Use hotplug to add a network interface to a running instance.
+
+ The counter action to this is :func:`detach_interface`.
+
+ :param context: The request context.
+ :param nova.objects.instance.Instance instance:
+ The instance which will get an additional network interface.
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
+ :param nova.network.model.VIF vif:
+ The object which has the information about the interface to attach.
+
+ :raise nova.exception.NovaException: If the attach fails.
+
+ :return: None
+ """
+ self._vmops.attach_interface(instance, vif)
+
+ def detach_interface(self, context, instance, vif):
+ """Use hotunplug to remove a network interface from a running instance.
+
+ The counter action to this is :func:`attach_interface`.
+
+ :param context: The request context.
+ :param nova.objects.instance.Instance instance:
+ The instance which gets a network interface removed.
+ :param nova.network.model.VIF vif:
+ The object which has the information about the interface to detach.
+
+ :raise nova.exception.NovaException: If the detach fails.
+
+ :return: None
+ """
+ self._vmops.detach_interface(instance, vif)
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index a474d23..6b07a62 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -20,6 +20,7 @@
from oslo_config import cfg
from oslo_log import log as logging
+from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
@@ -77,6 +78,8 @@ class XenVIFDriver(object):
LOG.debug("vif didn't exist, no need to unplug vif %s",
vif, instance=instance)
return
+ # hot unplug the VIF first
+ self.hot_unplug(vif, instance, vm_ref, vif_ref)
self._session.call_xenapi('VIF.destroy', vif_ref)
except Exception as e:
LOG.warn(
@@ -85,6 +88,44 @@ class XenVIFDriver(object):
raise exception.NovaException(
reason=_("Failed to unplug vif %s") % vif)
+ def hot_plug(self, vif, instance, vm_ref, vif_ref):
+ """hotplug virtual interface to running instance.
+ :param nova.network.model.VIF vif:
+ The object which has the information about the interface to attach.
+ :param nova.objects.instance.Instance instance:
+ The instance which will get an additional network interface.
+ :param string vm_ref:
+ The instance's reference from hypervisor's point of view.
+ :param string vif_ref:
+ The interface's reference from hypervisor's point of view.
+ :return: None
+ """
+ pass
+
+ def hot_unplug(self, vif, instance, vm_ref, vif_ref):
+ """hot unplug virtual interface from running instance.
+ :param nova.network.model.VIF vif:
+ The object which has the information about the interface to detach.
+ :param nova.objects.instance.Instance instance:
+ The instance which will remove additional network interface.
+ :param string vm_ref:
+ The instance's reference from hypervisor's point of view.
+ :param string vif_ref:
+ The interface's reference from hypervisor's point of view.
+ :return: None
+ """
+ pass
+
+ def post_start_actions(self, instance, vif_ref):
+ """post actions when the instance is power on.
+ :param nova.objects.instance.Instance instance:
+ The instance which will execute extra actions after power on
+ :param string vif_ref:
+ The interface's reference from hypervisor's point of view.
+ :return: None
+ """
+ pass
+
class XenAPIBridgeDriver(XenVIFDriver):
"""VIF Driver for XenAPI that uses XenAPI to create Networks."""
@@ -186,10 +227,6 @@ class XenAPIBridgeDriver(XenVIFDriver):
def unplug(self, instance, vif, vm_ref):
super(XenAPIBridgeDriver, self).unplug(instance, vif, vm_ref)
- def post_start_actions(self, instance, vif_ref):
- """no further actions needed for this driver type"""
- pass
-
class XenAPIOpenVswitchDriver(XenVIFDriver):
"""VIF driver for Open vSwitch with XenAPI."""
@@ -226,7 +263,12 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# OVS on the hypervisor monitors this key and uses it to
# set the iface-id attribute
vif_rec['other_config'] = {'nicira-iface-id': vif['id']}
- return self._create_vif(vif, vif_rec, vm_ref)
+ vif_ref = self._create_vif(vif, vif_rec, vm_ref)
+
+ # call XenAPI to plug vif
+ self.hot_plug(vif, instance, vm_ref, vif_ref)
+
+ return vif_ref
def unplug(self, instance, vif, vm_ref):
"""unplug vif:
@@ -294,6 +336,29 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete bridge"))
+ def hot_plug(self, vif, instance, vm_ref, vif_ref):
+ # hot plug vif only when VM's power state is running
+ LOG.debug("Hot plug vif, vif: %s", vif, instance=instance)
+ state = vm_utils.get_power_state(self._session, vm_ref)
+ if state != power_state.RUNNING:
+ LOG.debug("Skip hot plug VIF, VM is not running, vif: %s", vif,
+ instance=instance)
+ return
+
+ self._session.VIF.plug(vif_ref)
+ self.post_start_actions(instance, vif_ref)
+
+ def hot_unplug(self, vif, instance, vm_ref, vif_ref):
+ # hot unplug vif only when VM's power state is running
+ LOG.debug("Hot unplug vif, vif: %s", vif, instance=instance)
+ state = vm_utils.get_power_state(self._session, vm_ref)
+ if state != power_state.RUNNING:
+ LOG.debug("Skip hot unplug VIF, VM is not running, vif: %s", vif,
+ instance=instance)
+ return
+
+ self._session.VIF.unplug(vif_ref)
+
def _get_qbr_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1c93eac..182873f 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -2522,3 +2522,47 @@ class VMOps(object):
volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref])
return sr_uuid_map
+
+ def attach_interface(self, instance, vif):
+ LOG.debug("Attach interface, vif info: %s", vif, instance=instance)
+ vm_ref = self._get_vm_opaque_ref(instance)
+
+ @utils.synchronized('xenapi-vif-' + vm_ref)
+ def _attach_interface(instance, vm_ref, vif):
+ # find device for use with XenAPI
+ allowed_devices = self._session.VM.get_allowed_VIF_devices(vm_ref)
+ if allowed_devices is None or len(allowed_devices) == 0:
+ raise exception.InterfaceAttachFailed(
+ _('attach network interface %(vif_id)s to instance '
+ '%(instance_uuid)s failed, no allowed devices.'),
+ vif_id=vif['id'], instance_uuid=instance.uuid)
+ device = allowed_devices[0]
+ try:
+ # plug VIF
+ self.vif_driver.plug(instance, vif, vm_ref=vm_ref,
+ device=device)
+ # set firewall filtering
+ self.firewall_driver.setup_basic_filtering(instance, [vif])
+ except exception.NovaException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE('attach network interface %s failed.'),
+ vif['id'], instance=instance)
+ try:
+ self.vif_driver.unplug(instance, vif, vm_ref)
+ except exception.NovaException:
+ # if unplug failed, no need to raise exception
+ LOG.warning(_LW('Unplug VIF %s failed.'),
+ vif['id'], instance=instance)
+
+ _attach_interface(instance, vm_ref, vif)
+
+ def detach_interface(self, instance, vif):
+ LOG.debug("Detach interface, vif info: %s", vif, instance=instance)
+
+ try:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self.vif_driver.unplug(instance, vif, vm_ref)
+ except exception.NovaException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE('detach network interface %s failed.'),
+ vif['id'], instance=instance)

View File

@ -13,6 +13,7 @@ ASTUTE_SECTION = '@PLUGIN_NAME@'
PLUGIN_NAME = '@PLUGIN_NAME@'
LOG_ROOT = '/var/log/@PLUGIN_NAME@'
HIMN_IP = '169.254.0.1'
DIST_PACKAGES_DIR = '/usr/lib/python2.7/dist-packages/'
LOG = logging.getLogger('@PLUGIN_NAME@')