From b068b0437263d6c158d7f2031389a32e41f8c0e9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 25 Jan 2024 00:45:11 -0800 Subject: [PATCH] Remove the Hyper-V driver Nova Hyper-V driver is not tested in OpenStack upstream and no maintianers. This driver has been marked as deprecated in Antelope release. It has dependency on the OpenStack Winstacker project which has been retired[1]. As discussed in vPTG[2], removing the HyperV driver, tests, and its config. [1] https://review.opendev.org/c/openstack/governance/+/886880 [2] https://etherpad.opendev.org/p/nova-caracal-ptg#L301 Change-Id: I568c79bae9b9736a20c367096d748c730ed59f0e --- doc/source/admin/config-drive.rst | 13 +- nova/conf/__init__.py | 2 - nova/conf/compute.py | 1 - nova/conf/configdrive.py | 17 - nova/conf/hyperv.py | 337 --- nova/tests/unit/virt/hyperv/__init__.py | 20 - nova/tests/unit/virt/hyperv/test_base.py | 40 - .../virt/hyperv/test_block_device_manager.py | 438 ---- nova/tests/unit/virt/hyperv/test_driver.py | 493 ----- .../unit/virt/hyperv/test_eventhandler.py | 143 -- nova/tests/unit/virt/hyperv/test_hostops.py | 317 --- .../tests/unit/virt/hyperv/test_imagecache.py | 304 --- .../unit/virt/hyperv/test_livemigrationops.py | 239 --- .../unit/virt/hyperv/test_migrationops.py | 546 ----- nova/tests/unit/virt/hyperv/test_pathutils.py | 226 -- .../unit/virt/hyperv/test_rdpconsoleops.py | 47 - .../virt/hyperv/test_serialconsolehandler.py | 249 --- .../unit/virt/hyperv/test_serialconsoleops.py | 115 - .../unit/virt/hyperv/test_serialproxy.py | 130 -- .../unit/virt/hyperv/test_snapshotops.py | 123 -- nova/tests/unit/virt/hyperv/test_vif.py | 87 - nova/tests/unit/virt/hyperv/test_vmops.py | 1844 ----------------- nova/tests/unit/virt/hyperv/test_volumeops.py | 645 ------ nova/virt/hyperv/README.rst | 44 - nova/virt/hyperv/__init__.py | 17 - nova/virt/hyperv/block_device_manager.py | 270 --- nova/virt/hyperv/constants.py | 93 - nova/virt/hyperv/driver.py | 385 ---- nova/virt/hyperv/eventhandler.py | 96 - nova/virt/hyperv/hostops.py | 291 --- nova/virt/hyperv/imagecache.py | 249 --- nova/virt/hyperv/livemigrationops.py | 154 -- nova/virt/hyperv/migrationops.py | 346 ---- nova/virt/hyperv/pathutils.py | 201 -- nova/virt/hyperv/rdpconsoleops.py | 41 - nova/virt/hyperv/serialconsolehandler.py | 164 -- nova/virt/hyperv/serialconsoleops.py | 112 - nova/virt/hyperv/serialproxy.py | 129 -- nova/virt/hyperv/snapshotops.py | 117 -- nova/virt/hyperv/vif.py | 63 - nova/virt/hyperv/vmops.py | 1127 ---------- nova/virt/hyperv/volumeops.py | 378 ---- .../notes/remove-hyperv-94d5bfd8a539fe9f.yaml | 29 + setup.cfg | 2 - tox.ini | 1 - 45 files changed, 30 insertions(+), 10655 deletions(-) delete mode 100644 nova/conf/hyperv.py delete mode 100644 nova/tests/unit/virt/hyperv/__init__.py delete mode 100644 nova/tests/unit/virt/hyperv/test_base.py delete mode 100644 nova/tests/unit/virt/hyperv/test_block_device_manager.py delete mode 100644 nova/tests/unit/virt/hyperv/test_driver.py delete mode 100644 nova/tests/unit/virt/hyperv/test_eventhandler.py delete mode 100644 nova/tests/unit/virt/hyperv/test_hostops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_imagecache.py delete mode 100644 nova/tests/unit/virt/hyperv/test_livemigrationops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_migrationops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_pathutils.py delete mode 100644 nova/tests/unit/virt/hyperv/test_rdpconsoleops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_serialconsolehandler.py delete mode 100644 nova/tests/unit/virt/hyperv/test_serialconsoleops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_serialproxy.py delete mode 100644 nova/tests/unit/virt/hyperv/test_snapshotops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_vif.py delete mode 100644 nova/tests/unit/virt/hyperv/test_vmops.py delete mode 100644 nova/tests/unit/virt/hyperv/test_volumeops.py delete mode 100644 nova/virt/hyperv/README.rst delete mode 100644 nova/virt/hyperv/__init__.py delete mode 100644 nova/virt/hyperv/block_device_manager.py delete mode 100644 nova/virt/hyperv/constants.py delete mode 100644 nova/virt/hyperv/driver.py delete mode 100644 nova/virt/hyperv/eventhandler.py delete mode 100644 nova/virt/hyperv/hostops.py delete mode 100644 nova/virt/hyperv/imagecache.py delete mode 100644 nova/virt/hyperv/livemigrationops.py delete mode 100644 nova/virt/hyperv/migrationops.py delete mode 100644 nova/virt/hyperv/pathutils.py delete mode 100644 nova/virt/hyperv/rdpconsoleops.py delete mode 100644 nova/virt/hyperv/serialconsolehandler.py delete mode 100644 nova/virt/hyperv/serialconsoleops.py delete mode 100644 nova/virt/hyperv/serialproxy.py delete mode 100644 nova/virt/hyperv/snapshotops.py delete mode 100644 nova/virt/hyperv/vif.py delete mode 100644 nova/virt/hyperv/vmops.py delete mode 100644 nova/virt/hyperv/volumeops.py create mode 100644 releasenotes/notes/remove-hyperv-94d5bfd8a539fe9f.yaml diff --git a/doc/source/admin/config-drive.rst b/doc/source/admin/config-drive.rst index 7111a2407be6..62e4c5f5dffb 100644 --- a/doc/source/admin/config-drive.rst +++ b/doc/source/admin/config-drive.rst @@ -39,7 +39,7 @@ compute host and image. .. rubric:: Compute host requirements -The following virt drivers support the config drive: libvirt, Hyper-V and +The following virt drivers support the config drive: libvirt and VMware. The Bare Metal service also supports the config drive. - To use config drives with libvirt or VMware, you must first @@ -49,12 +49,6 @@ VMware. The Bare Metal service also supports the config drive. the same path as the :program:`nova-compute` service, you do not need to set this flag. -- To use config drives with Hyper-V, you must set the - :oslo.config:option:`mkisofs_cmd` config option to the full path to an - :command:`mkisofs.exe` installation. Additionally, you must set the - :oslo.config:option:`hyperv.qemu_img_cmd` config option to the full path to an - :command:`qemu-img` command installation. - - To use config drives with the Bare Metal service, you do not need to prepare anything. @@ -81,11 +75,6 @@ options: - :oslo.config:option:`force_config_drive` - :oslo.config:option:`config_drive_format` -If using the HyperV compute driver, the following additional options are -supported: - -- :oslo.config:option:`hyperv.config_drive_cdrom` - For example, to ensure nova always provides a config drive to instances but versions ``2018-08-27`` (Rocky) and ``2017-02-22`` (Ocata) are skipped, add the following to :file:`nova.conf`: diff --git a/nova/conf/__init__.py b/nova/conf/__init__.py index 9e5a57afba7a..169c5946a652 100644 --- a/nova/conf/__init__.py +++ b/nova/conf/__init__.py @@ -35,7 +35,6 @@ from nova.conf import devices from nova.conf import ephemeral_storage from nova.conf import glance from nova.conf import guestfs -from nova.conf import hyperv from nova.conf import imagecache from nova.conf import ironic from nova.conf import key_manager @@ -84,7 +83,6 @@ devices.register_opts(CONF) ephemeral_storage.register_opts(CONF) glance.register_opts(CONF) guestfs.register_opts(CONF) -hyperv.register_opts(CONF) mks.register_opts(CONF) imagecache.register_opts(CONF) ironic.register_opts(CONF) diff --git a/nova/conf/compute.py b/nova/conf/compute.py index dde79fc86ff5..3d0db555f53e 100644 --- a/nova/conf/compute.py +++ b/nova/conf/compute.py @@ -40,7 +40,6 @@ Possible values: * ``fake.FakeDriver`` * ``ironic.IronicDriver`` * ``vmwareapi.VMwareVCDriver`` -* ``hyperv.HyperVDriver`` * ``zvm.ZVMDriver`` """), cfg.BoolOpt('allow_resize_to_same_host', diff --git a/nova/conf/configdrive.py b/nova/conf/configdrive.py index 618ec759acc1..b1765f28ee73 100644 --- a/nova/conf/configdrive.py +++ b/nova/conf/configdrive.py @@ -44,10 +44,6 @@ Related options: config drive option 3. the image used to create the instance requires a config drive, this is defined by ``img_config_drive`` property for that image. - -* A compute node running Hyper-V hypervisor can be configured to attach - config drive as a CD drive. To attach the config drive as a CD drive, set the - ``[hyperv] config_drive_cdrom`` option to true. """), cfg.BoolOpt('force_config_drive', default=False, @@ -71,11 +67,6 @@ Related options: * Use the 'mkisofs_cmd' flag to set the path where you install the genisoimage program. If genisoimage is in same path as the nova-compute service, you do not need to set this flag. -* To use a config drive with Hyper-V, you must set the - 'mkisofs_cmd' value to the full path to an mkisofs.exe installation. - Additionally, you must set the qemu_img_cmd value in the hyperv - configuration section to the full path to an qemu-img command - installation. """), cfg.StrOpt('mkisofs_cmd', default='genisoimage', @@ -86,11 +77,6 @@ Use the ``mkisofs_cmd`` flag to set the path where you install the ``genisoimage`` program. If ``genisoimage`` is on the system path, you do not need to change the default value. -To use a config drive with Hyper-V, you must set the ``mkisofs_cmd`` value to -the full path to an ``mkisofs.exe`` installation. Additionally, you must set -the ``qemu_img_cmd`` value in the hyperv configuration section to the full path -to an ``qemu-img`` command installation. - Possible values: * Name of the ISO image creator program, in case it is in the same directory @@ -100,9 +86,6 @@ Possible values: Related options: * This option is meaningful when config drives are enabled. -* To use config drive with Hyper-V, you must set the ``qemu_img_cmd`` - value in the hyperv configuration section to the full path to an ``qemu-img`` - command installation. """), ] diff --git a/nova/conf/hyperv.py b/nova/conf/hyperv.py deleted file mode 100644 index cce3cdc3e2dc..000000000000 --- a/nova/conf/hyperv.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright (c) 2016 TUBITAK BILGEM -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -hyperv_opt_group = cfg.OptGroup("hyperv", - title='The Hyper-V feature', - help=""" -The hyperv feature allows you to configure the Hyper-V hypervisor -driver to be used within an OpenStack deployment. -""") - -hyperv_opts = [ - cfg.FloatOpt('dynamic_memory_ratio', - default=1.0, - help=""" -Dynamic memory ratio - -Enables dynamic memory allocation (ballooning) when set to a value -greater than 1. The value expresses the ratio between the total RAM -assigned to an instance and its startup RAM amount. For example a -ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of -RAM allocated at startup. - -Possible values: - -* 1.0: Disables dynamic memory allocation (Default). -* Float values greater than 1.0: Enables allocation of total implied - RAM divided by this value for startup. -"""), - cfg.BoolOpt('enable_instance_metrics_collection', - default=False, - help=""" -Enable instance metrics collection - -Enables metrics collections for an instance by using Hyper-V's -metric APIs. Collected data can be retrieved by other apps and -services, e.g.: Ceilometer. -"""), - cfg.StrOpt('instances_path_share', - default="", - help=""" -Instances path share - -The name of a Windows share mapped to the "instances_path" dir -and used by the resize feature to copy files to the target host. -If left blank, an administrative share (hidden network share) will -be used, looking for the same "instances_path" used locally. - -Possible values: - -* "": An administrative share will be used (Default). -* Name of a Windows share. - -Related options: - -* "instances_path": The directory which will be used if this option - here is left blank. -"""), - cfg.BoolOpt('limit_cpu_features', - default=False, - help=""" -Limit CPU features - -This flag is needed to support live migration to hosts with -different CPU features and checked during instance creation -in order to limit the CPU features used by the instance. -"""), - cfg.IntOpt('mounted_disk_query_retry_count', - default=10, - min=0, - help=""" -Mounted disk query retry count - -The number of times to retry checking for a mounted disk. -The query runs until the device can be found or the retry -count is reached. - -Possible values: - -* Positive integer values. Values greater than 1 is recommended - (Default: 10). - -Related options: - -* Time interval between disk mount retries is declared with - "mounted_disk_query_retry_interval" option. -"""), - cfg.IntOpt('mounted_disk_query_retry_interval', - default=5, - min=0, - help=""" -Mounted disk query retry interval - -Interval between checks for a mounted disk, in seconds. - -Possible values: - -* Time in seconds (Default: 5). - -Related options: - -* This option is meaningful when the mounted_disk_query_retry_count - is greater than 1. -* The retry loop runs with mounted_disk_query_retry_count and - mounted_disk_query_retry_interval configuration options. -"""), - cfg.IntOpt('power_state_check_timeframe', - default=60, - min=0, - help=""" -Power state check timeframe - -The timeframe to be checked for instance power state changes. -This option is used to fetch the state of the instance from Hyper-V -through the WMI interface, within the specified timeframe. - -Possible values: - -* Timeframe in seconds (Default: 60). -"""), - cfg.IntOpt('power_state_event_polling_interval', - default=2, - min=0, - help=""" -Power state event polling interval - -Instance power state change event polling frequency. Sets the -listener interval for power state events to the given value. -This option enhances the internal lifecycle notifications of -instances that reboot themselves. It is unlikely that an operator -has to change this value. - -Possible values: - -* Time in seconds (Default: 2). -"""), - cfg.StrOpt('qemu_img_cmd', - default="qemu-img.exe", - help=r""" -qemu-img command - -qemu-img is required for some of the image related operations -like converting between different image types. You can get it -from here: (http://qemu.weilnetz.de/) or you can install the -Cloudbase OpenStack Hyper-V Compute Driver -(https://cloudbase.it/openstack-hyperv-driver/) which automatically -sets the proper path for this config option. You can either give the -full path of qemu-img.exe or set its path in the PATH environment -variable and leave this option to the default value. - -Possible values: - -* Name of the qemu-img executable, in case it is in the same - directory as the nova-compute service or its path is in the - PATH environment variable (Default). -* Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND). - -Related options: - -* If the config_drive_cdrom option is False, qemu-img will be used to - convert the ISO to a VHD, otherwise the config drive will - remain an ISO. To use config drive with Hyper-V, you must - set the ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` - installation. -"""), - cfg.StrOpt('vswitch_name', - help=""" -External virtual switch name - -The Hyper-V Virtual Switch is a software-based layer-2 Ethernet -network switch that is available with the installation of the -Hyper-V server role. The switch includes programmatically managed -and extensible capabilities to connect virtual machines to both -virtual networks and the physical network. In addition, Hyper-V -Virtual Switch provides policy enforcement for security, isolation, -and service levels. The vSwitch represented by this config option -must be an external one (not internal or private). - -Possible values: - -* If not provided, the first of a list of available vswitches - is used. This list is queried using WQL. -* Virtual switch name. -"""), - cfg.IntOpt('wait_soft_reboot_seconds', - default=60, - min=0, - help=""" -Wait soft reboot seconds - -Number of seconds to wait for instance to shut down after soft -reboot request is made. We fall back to hard reboot if instance -does not shutdown within this window. - -Possible values: - -* Time in seconds (Default: 60). -"""), - cfg.BoolOpt('config_drive_cdrom', - default=False, - help=""" -Mount config drive as a CD drive. - -OpenStack can be configured to write instance metadata to a config drive, which -is then attached to the instance before it boots. The config drive can be -attached as a disk drive (default) or as a CD drive. - -Related options: - -* This option is meaningful with ``force_config_drive`` option set to ``True`` - or when the REST API call to create an instance will have - ``--config-drive=True`` flag. -* ``config_drive_format`` option must be set to ``iso9660`` in order to use - CD drive as the config drive image. -* To use config drive with Hyper-V, you must set the - ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` installation. - Additionally, you must set the ``qemu_img_cmd`` value to the full path - to an ``qemu-img`` command installation. -* You can configure the Compute service to always create a configuration - drive by setting the ``force_config_drive`` option to ``True``. -"""), - cfg.BoolOpt('config_drive_inject_password', - default=False, - help=""" -Inject password to config drive. - -When enabled, the admin password will be available from the config drive image. - -Related options: - -* This option is meaningful when used with other options that enable - config drive usage with Hyper-V, such as ``force_config_drive``. -"""), - cfg.IntOpt('volume_attach_retry_count', - default=10, - min=0, - help=""" -Volume attach retry count - -The number of times to retry attaching a volume. Volume attachment -is retried until success or the given retry count is reached. - -Possible values: - -* Positive integer values (Default: 10). - -Related options: - -* Time interval between attachment attempts is declared with - volume_attach_retry_interval option. -"""), - cfg.IntOpt('volume_attach_retry_interval', - default=5, - min=0, - help=""" -Volume attach retry interval - -Interval between volume attachment attempts, in seconds. - -Possible values: - -* Time in seconds (Default: 5). - -Related options: - -* This options is meaningful when volume_attach_retry_count - is greater than 1. -* The retry loop runs with volume_attach_retry_count and - volume_attach_retry_interval configuration options. -"""), - cfg.BoolOpt('enable_remotefx', - default=False, - help=""" -Enable RemoteFX feature - -This requires at least one DirectX 11 capable graphics adapter for -Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization -feature has to be enabled. - -Instances with RemoteFX can be requested with the following flavor -extra specs: - -**os:resolution**. Guest VM screen resolution size. Acceptable values:: - - 1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160 - -``3840x2160`` is only available on Windows / Hyper-V Server 2016. - -**os:monitors**. Guest VM number of monitors. Acceptable values:: - - [1, 4] - Windows / Hyper-V Server 2012 R2 - [1, 8] - Windows / Hyper-V Server 2016 - -**os:vram**. Guest VM VRAM amount. Only available on -Windows / Hyper-V Server 2016. Acceptable values:: - - 64, 128, 256, 512, 1024 -"""), - cfg.BoolOpt('use_multipath_io', - default=False, - help=""" -Use multipath connections when attaching iSCSI or FC disks. - -This requires the Multipath IO Windows feature to be enabled. MPIO must be -configured to claim such devices. -"""), - cfg.ListOpt('iscsi_initiator_list', - default=[], - help=""" -List of iSCSI initiators that will be used for establishing iSCSI sessions. - -If none are specified, the Microsoft iSCSI initiator service will choose the -initiator. -""") -] - - -def register_opts(conf): - conf.register_group(hyperv_opt_group) - conf.register_opts(hyperv_opts, group=hyperv_opt_group) - - -def list_opts(): - return {hyperv_opt_group: hyperv_opts} diff --git a/nova/tests/unit/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py deleted file mode 100644 index 2190f0570ff8..000000000000 --- a/nova/tests/unit/virt/hyperv/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -try: - import os_win # noqa: F401 -except ImportError: - raise unittest.SkipTest( - "The 'os-win' dependency is not installed." - ) diff --git a/nova/tests/unit/virt/hyperv/test_base.py b/nova/tests/unit/virt/hyperv/test_base.py deleted file mode 100644 index 1dd7db367bff..000000000000 --- a/nova/tests/unit/virt/hyperv/test_base.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_win import utilsfactory - -from nova import test - - -class HyperVBaseTestCase(test.NoDBTestCase): - def setUp(self): - super(HyperVBaseTestCase, self).setUp() - - self._mock_wmi = mock.MagicMock() - wmi_patcher = mock.patch('builtins.wmi', create=True, - new=self._mock_wmi) - platform_patcher = mock.patch('sys.platform', 'win32') - utilsfactory_patcher = mock.patch.object(utilsfactory, '_get_class') - - platform_patcher.start() - wmi_patcher.start() - utilsfactory_patcher.start() - - self.addCleanup(wmi_patcher.stop) - self.addCleanup(platform_patcher.stop) - self.addCleanup(utilsfactory_patcher.stop) diff --git a/nova/tests/unit/virt/hyperv/test_block_device_manager.py b/nova/tests/unit/virt/hyperv/test_block_device_manager.py deleted file mode 100644 index 0d914a55a58f..000000000000 --- a/nova/tests/unit/virt/hyperv/test_block_device_manager.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win import constants as os_win_const -from unittest import mock - -from nova import exception -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import block_device_manager -from nova.virt.hyperv import constants - - -class BlockDeviceManagerTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V BlockDeviceInfoManager class.""" - - def setUp(self): - super(BlockDeviceManagerTestCase, self).setUp() - self._bdman = block_device_manager.BlockDeviceInfoManager() - - def test_get_device_bus_scsi(self): - bdm = {'disk_bus': constants.CTRL_TYPE_SCSI, - 'drive_addr': 0, 'ctrl_disk_addr': 2} - - bus = self._bdman._get_device_bus(bdm) - self.assertEqual('0:0:0:2', bus.address) - - def test_get_device_bus_ide(self): - bdm = {'disk_bus': constants.CTRL_TYPE_IDE, - 'drive_addr': 0, 'ctrl_disk_addr': 1} - - bus = self._bdman._get_device_bus(bdm) - self.assertEqual('0:1', bus.address) - - @staticmethod - def _bdm_mock(**kwargs): - bdm = mock.MagicMock(**kwargs) - bdm.__contains__.side_effect = ( - lambda attr: getattr(bdm, attr, None) is not None) - return bdm - - @mock.patch.object(block_device_manager.objects, 'DiskMetadata') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_device_bus') - @mock.patch.object(block_device_manager.objects.BlockDeviceMappingList, - 'get_by_instance_uuid') - def test_get_bdm_metadata(self, mock_get_by_inst_uuid, mock_get_device_bus, - mock_DiskMetadata): - mock_instance = mock.MagicMock() - root_disk = {'mount_device': mock.sentinel.dev0} - ephemeral = {'device_name': mock.sentinel.dev1} - block_device_info = { - 'root_disk': root_disk, - 'block_device_mapping': [ - {'mount_device': mock.sentinel.dev2}, - {'mount_device': mock.sentinel.dev3}, - ], - 'ephemerals': [ephemeral], - } - - bdm = self._bdm_mock(device_name=mock.sentinel.dev0, tag='taggy', - volume_id=mock.sentinel.uuid1) - eph = self._bdm_mock(device_name=mock.sentinel.dev1, tag='ephy', - volume_id=mock.sentinel.uuid2) - mock_get_by_inst_uuid.return_value = [ - bdm, eph, self._bdm_mock(device_name=mock.sentinel.dev2, tag=None), - ] - - bdm_metadata = self._bdman.get_bdm_metadata(mock.sentinel.context, - mock_instance, - block_device_info) - - mock_get_by_inst_uuid.assert_called_once_with(mock.sentinel.context, - mock_instance.uuid) - mock_get_device_bus.assert_has_calls( - [mock.call(root_disk), mock.call(ephemeral)], any_order=True) - mock_DiskMetadata.assert_has_calls( - [mock.call(bus=mock_get_device_bus.return_value, - serial=bdm.volume_id, tags=[bdm.tag]), - mock.call(bus=mock_get_device_bus.return_value, - serial=eph.volume_id, tags=[eph.tag])], - any_order=True) - self.assertEqual([mock_DiskMetadata.return_value] * 2, bdm_metadata) - - @mock.patch('nova.virt.configdrive.required_by') - def test_init_controller_slot_counter_gen1_no_configdrive( - self, mock_cfg_drive_req): - mock_cfg_drive_req.return_value = False - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1) - - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][0], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER) - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER) - self.assertEqual(slot_map[constants.CTRL_TYPE_SCSI][0], - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER) - - @mock.patch('nova.virt.configdrive.required_by') - def test_init_controller_slot_counter_gen1(self, mock_cfg_drive_req): - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1) - - self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1], - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER - 1) - - @mock.patch.object(block_device_manager.configdrive, 'required_by') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_initialize_controller_slot_counter') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_root_device') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_ephemerals') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_volumes') - def _check_validate_and_update_bdi(self, mock_check_and_update_vol, - mock_check_and_update_eph, - mock_check_and_update_root, - mock_init_ctrl_cntr, - mock_required_by, available_slots=1): - mock_required_by.return_value = True - slot_map = {constants.CTRL_TYPE_SCSI: [available_slots]} - mock_init_ctrl_cntr.return_value = slot_map - - if available_slots: - self._bdman.validate_and_update_bdi(mock.sentinel.FAKE_INSTANCE, - mock.sentinel.IMAGE_META, - constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - else: - self.assertRaises(exception.InvalidBDMFormat, - self._bdman.validate_and_update_bdi, - mock.sentinel.FAKE_INSTANCE, - mock.sentinel.IMAGE_META, - constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - - mock_init_ctrl_cntr.assert_called_once_with( - mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_2) - mock_check_and_update_root.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.IMAGE_META, - mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_check_and_update_eph.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_check_and_update_vol.assert_called_once_with( - constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map) - mock_required_by.assert_called_once_with(mock.sentinel.FAKE_INSTANCE) - - def test_validate_and_update_bdi(self): - self._check_validate_and_update_bdi() - - def test_validate_and_update_bdi_insufficient_slots(self): - self._check_validate_and_update_bdi(available_slots=0) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_available_controller_slot') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'is_boot_from_volume') - def _test_check_and_update_root_device(self, mock_is_boot_from_vol, - mock_get_avail_ctrl_slot, - disk_format, - vm_gen=constants.VM_GEN_1, - boot_from_volume=False): - image_meta = mock.MagicMock(disk_format=disk_format) - bdi = {'root_device': '/dev/sda', - 'block_device_mapping': [ - {'mount_device': '/dev/sda', - 'connection_info': mock.sentinel.FAKE_CONN_INFO}]} - - mock_is_boot_from_vol.return_value = boot_from_volume - mock_get_avail_ctrl_slot.return_value = (0, 0) - - self._bdman._check_and_update_root_device(vm_gen, image_meta, bdi, - mock.sentinel.SLOT_MAP) - - root_disk = bdi['root_disk'] - if boot_from_volume: - self.assertEqual(root_disk['type'], constants.VOLUME) - self.assertIsNone(root_disk['path']) - self.assertEqual(root_disk['connection_info'], - mock.sentinel.FAKE_CONN_INFO) - else: - image_type = self._bdman._TYPE_FOR_DISK_FORMAT.get( - image_meta.disk_format) - self.assertEqual(root_disk['type'], image_type) - self.assertIsNone(root_disk['path']) - self.assertIsNone(root_disk['connection_info']) - - disk_bus = (constants.CTRL_TYPE_IDE if - vm_gen == constants.VM_GEN_1 else constants.CTRL_TYPE_SCSI) - self.assertEqual(root_disk['disk_bus'], disk_bus) - self.assertEqual(root_disk['drive_addr'], 0) - self.assertEqual(root_disk['ctrl_disk_addr'], 0) - self.assertEqual(root_disk['boot_index'], 0) - self.assertEqual(root_disk['mount_device'], bdi['root_device']) - mock_get_avail_ctrl_slot.assert_called_once_with( - root_disk['disk_bus'], mock.sentinel.SLOT_MAP) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - 'is_boot_from_volume', return_value=False) - def test_check_and_update_root_device_exception(self, mock_is_boot_vol): - bdi = {} - image_meta = mock.MagicMock(disk_format=mock.sentinel.fake_format) - - self.assertRaises(exception.InvalidImageFormat, - self._bdman._check_and_update_root_device, - constants.VM_GEN_1, image_meta, bdi, - mock.sentinel.SLOT_MAP) - - def test_check_and_update_root_device_gen1(self): - self._test_check_and_update_root_device(disk_format='vhd') - - def test_check_and_update_root_device_gen1_vhdx(self): - self._test_check_and_update_root_device(disk_format='vhdx') - - def test_check_and_update_root_device_gen1_iso(self): - self._test_check_and_update_root_device(disk_format='iso') - - def test_check_and_update_root_device_gen2(self): - self._test_check_and_update_root_device(disk_format='vhd', - vm_gen=constants.VM_GEN_2) - - def test_check_and_update_root_device_boot_from_vol_gen1(self): - self._test_check_and_update_root_device(disk_format='vhd', - boot_from_volume=True) - - def test_check_and_update_root_device_boot_from_vol_gen2(self): - self._test_check_and_update_root_device(disk_format='vhd', - vm_gen=constants.VM_GEN_2, - boot_from_volume=True) - - @mock.patch('nova.virt.configdrive.required_by', return_value=True) - def _test_get_available_controller_slot(self, mock_config_drive_req, - bus=constants.CTRL_TYPE_IDE, - fail=False): - - slot_map = self._bdman._initialize_controller_slot_counter( - mock.sentinel.FAKE_VM, constants.VM_GEN_1) - - if fail: - slot_map[constants.CTRL_TYPE_IDE][0] = 0 - slot_map[constants.CTRL_TYPE_IDE][1] = 0 - self.assertRaises(exception.InvalidBDMFormat, - self._bdman._get_available_controller_slot, - constants.CTRL_TYPE_IDE, - slot_map) - else: - (disk_addr, - ctrl_disk_addr) = self._bdman._get_available_controller_slot( - bus, slot_map) - - self.assertEqual(0, disk_addr) - self.assertEqual(0, ctrl_disk_addr) - - def test_get_available_controller_slot(self): - self._test_get_available_controller_slot() - - def test_get_available_controller_slot_scsi_ctrl(self): - self._test_get_available_controller_slot(bus=constants.CTRL_TYPE_SCSI) - - def test_get_available_controller_slot_exception(self): - self._test_get_available_controller_slot(fail=True) - - def test_is_boot_from_volume_true(self): - vol = {'mount_device': self._bdman._DEFAULT_ROOT_DEVICE} - block_device_info = {'block_device_mapping': [vol]} - ret = self._bdman.is_boot_from_volume(block_device_info) - - self.assertTrue(ret) - - def test_is_boot_from_volume_false(self): - block_device_info = {'block_device_mapping': []} - ret = self._bdman.is_boot_from_volume(block_device_info) - - self.assertFalse(ret) - - def test_get_root_device_bdm(self): - mount_device = '/dev/sda' - bdm1 = {'mount_device': None} - bdm2 = {'mount_device': mount_device} - bdi = {'block_device_mapping': [bdm1, bdm2]} - - ret = self._bdman._get_root_device_bdm(bdi, mount_device) - - self.assertEqual(bdm2, ret) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_bdm') - def test_check_and_update_ephemerals(self, mock_check_and_update_bdm): - fake_ephemerals = [mock.sentinel.eph1, mock.sentinel.eph2, - mock.sentinel.eph3] - fake_bdi = {'ephemerals': fake_ephemerals} - expected_calls = [] - for eph in fake_ephemerals: - expected_calls.append(mock.call(mock.sentinel.fake_slot_map, - mock.sentinel.fake_vm_gen, - eph)) - self._bdman._check_and_update_ephemerals(mock.sentinel.fake_vm_gen, - fake_bdi, - mock.sentinel.fake_slot_map) - mock_check_and_update_bdm.assert_has_calls(expected_calls) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_check_and_update_bdm') - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_root_device_bdm') - def test_check_and_update_volumes(self, mock_get_root_dev_bdm, - mock_check_and_update_bdm): - fake_vol1 = {'mount_device': '/dev/sda'} - fake_vol2 = {'mount_device': '/dev/sdb'} - fake_volumes = [fake_vol1, fake_vol2] - fake_bdi = {'block_device_mapping': fake_volumes, - 'root_disk': {'mount_device': '/dev/sda'}} - mock_get_root_dev_bdm.return_value = fake_vol1 - - self._bdman._check_and_update_volumes(mock.sentinel.fake_vm_gen, - fake_bdi, - mock.sentinel.fake_slot_map) - - mock_get_root_dev_bdm.assert_called_once_with(fake_bdi, '/dev/sda') - mock_check_and_update_bdm.assert_called_once_with( - mock.sentinel.fake_slot_map, mock.sentinel.fake_vm_gen, fake_vol2) - self.assertNotIn(fake_vol1, fake_bdi) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_available_controller_slot') - def test_check_and_update_bdm_with_defaults(self, mock_get_ctrl_slot): - mock_get_ctrl_slot.return_value = ((mock.sentinel.DRIVE_ADDR, - mock.sentinel.CTRL_DISK_ADDR)) - bdm = {'device_type': None, - 'disk_bus': None, - 'boot_index': None} - - self._bdman._check_and_update_bdm(mock.sentinel.FAKE_SLOT_MAP, - constants.VM_GEN_1, bdm) - - mock_get_ctrl_slot.assert_called_once_with( - bdm['disk_bus'], mock.sentinel.FAKE_SLOT_MAP) - self.assertEqual(mock.sentinel.DRIVE_ADDR, bdm['drive_addr']) - self.assertEqual(mock.sentinel.CTRL_DISK_ADDR, bdm['ctrl_disk_addr']) - self.assertEqual('disk', bdm['device_type']) - self.assertEqual(self._bdman._DEFAULT_BUS, bdm['disk_bus']) - self.assertIsNone(bdm['boot_index']) - - def test_check_and_update_bdm_exception_device_type(self): - bdm = {'device_type': 'cdrom', - 'disk_bus': 'IDE'} - - self.assertRaises(exception.InvalidDiskInfo, - self._bdman._check_and_update_bdm, - mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm) - - def test_check_and_update_bdm_exception_disk_bus(self): - bdm = {'device_type': 'disk', - 'disk_bus': 'fake_bus'} - - self.assertRaises(exception.InvalidDiskInfo, - self._bdman._check_and_update_bdm, - mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm) - - def test_sort_by_boot_order(self): - original = [{'boot_index': 2}, {'boot_index': None}, {'boot_index': 1}] - expected = [original[2], original[0], original[1]] - - self._bdman._sort_by_boot_order(original) - self.assertEqual(expected, original) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_boot_order_gen1') - def test_get_boot_order_gen1_vm(self, mock_get_boot_order): - self._bdman.get_boot_order(constants.VM_GEN_1, - mock.sentinel.BLOCK_DEV_INFO) - mock_get_boot_order.assert_called_once_with( - mock.sentinel.BLOCK_DEV_INFO) - - @mock.patch.object(block_device_manager.BlockDeviceInfoManager, - '_get_boot_order_gen2') - def test_get_boot_order_gen2_vm(self, mock_get_boot_order): - self._bdman.get_boot_order(constants.VM_GEN_2, - mock.sentinel.BLOCK_DEV_INFO) - mock_get_boot_order.assert_called_once_with( - mock.sentinel.BLOCK_DEV_INFO) - - def test_get_boot_order_gen1_iso(self): - fake_bdi = {'root_disk': {'type': 'iso'}} - expected = [os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - res = self._bdman._get_boot_order_gen1(fake_bdi) - self.assertEqual(expected, res) - - def test_get_boot_order_gen1_vhd(self): - fake_bdi = {'root_disk': {'type': 'vhd'}} - expected = [os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - res = self._bdman._get_boot_order_gen1(fake_bdi) - self.assertEqual(expected, res) - - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.get_disk_resource_path') - def test_get_boot_order_gen2(self, mock_get_disk_path): - fake_root_disk = {'boot_index': 0, - 'path': mock.sentinel.FAKE_ROOT_PATH} - fake_eph1 = {'boot_index': 2, - 'path': mock.sentinel.FAKE_EPH_PATH1} - fake_eph2 = {'boot_index': 3, - 'path': mock.sentinel.FAKE_EPH_PATH2} - fake_bdm = {'boot_index': 1, - 'connection_info': mock.sentinel.FAKE_CONN_INFO} - fake_bdi = {'root_disk': fake_root_disk, - 'ephemerals': [fake_eph1, - fake_eph2], - 'block_device_mapping': [fake_bdm]} - - mock_get_disk_path.return_value = fake_bdm['connection_info'] - - expected_res = [mock.sentinel.FAKE_ROOT_PATH, - mock.sentinel.FAKE_CONN_INFO, - mock.sentinel.FAKE_EPH_PATH1, - mock.sentinel.FAKE_EPH_PATH2] - - res = self._bdman._get_boot_order_gen2(fake_bdi) - - self.assertEqual(expected_res, res) diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py deleted file mode 100644 index c9ccc6e8f1b6..000000000000 --- a/nova/tests/unit/virt/hyperv/test_driver.py +++ /dev/null @@ -1,493 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for the Hyper-V Driver. -""" - -import platform -import sys -from unittest import mock - -from os_win import exceptions as os_win_exc - -from nova import exception -from nova import safe_utils -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.hyperv import test_base -from nova.virt import driver as base_driver -from nova.virt.hyperv import driver - - -class HyperVDriverTestCase(test_base.HyperVBaseTestCase): - - FAKE_WIN_2008R2_VERSION = '6.0.0' - - @mock.patch.object(driver.HyperVDriver, '_check_minimum_windows_version') - def setUp(self, mock_check_minimum_windows_version): - super(HyperVDriverTestCase, self).setUp() - - self.context = 'context' - self.driver = driver.HyperVDriver(mock.sentinel.virtapi) - self.driver._hostops = mock.MagicMock() - self.driver._volumeops = mock.MagicMock() - self.driver._vmops = mock.MagicMock() - self.driver._snapshotops = mock.MagicMock() - self.driver._livemigrationops = mock.MagicMock() - self.driver._migrationops = mock.MagicMock() - self.driver._rdpconsoleops = mock.MagicMock() - self.driver._serialconsoleops = mock.MagicMock() - self.driver._imagecache = mock.MagicMock() - - @mock.patch.object(driver.LOG, 'warning') - @mock.patch.object(driver.utilsfactory, 'get_hostutils') - def test_check_minimum_windows_version(self, mock_get_hostutils, - mock_warning): - mock_hostutils = mock_get_hostutils.return_value - mock_hostutils.check_min_windows_version.return_value = False - - self.assertRaises(exception.HypervisorTooOld, - self.driver._check_minimum_windows_version) - - mock_hostutils.check_min_windows_version.side_effect = [True, False] - - self.driver._check_minimum_windows_version() - self.assertTrue(mock_warning.called) - - def test_public_api_signatures(self): - # NOTE(claudiub): wrapped functions do not keep the same signature in - # Python 2.7, which causes this test to fail. Instead, we should - # compare the public API signatures of the unwrapped methods. - - for attr in driver.HyperVDriver.__dict__: - class_member = getattr(driver.HyperVDriver, attr) - if callable(class_member): - mocked_method = mock.patch.object( - driver.HyperVDriver, attr, - safe_utils.get_wrapped_function(class_member)) - mocked_method.start() - self.addCleanup(mocked_method.stop) - - self.assertPublicAPISignatures(base_driver.ComputeDriver, - driver.HyperVDriver) - - def test_converted_exception(self): - self.driver._vmops.get_info.side_effect = ( - os_win_exc.OSWinException) - self.assertRaises(exception.NovaException, - self.driver.get_info, mock.sentinel.instance) - - self.driver._vmops.get_info.side_effect = os_win_exc.HyperVException - self.assertRaises(exception.NovaException, - self.driver.get_info, mock.sentinel.instance) - - self.driver._vmops.get_info.side_effect = ( - os_win_exc.HyperVVMNotFoundException(vm_name='foofoo')) - self.assertRaises(exception.InstanceNotFound, - self.driver.get_info, mock.sentinel.instance) - - def test_assert_original_traceback_maintained(self): - def bar(self): - foo = "foofoo" - raise os_win_exc.HyperVVMNotFoundException(vm_name=foo) - - self.driver._vmops.get_info.side_effect = bar - try: - self.driver.get_info(mock.sentinel.instance) - self.fail("Test expected exception, but it was not raised.") - except exception.InstanceNotFound: - # exception has been raised as expected. - _, _, trace = sys.exc_info() - while trace.tb_next: - # iterate until the original exception source, bar. - trace = trace.tb_next - - # original frame will contain the 'foo' variable. - self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) - - @mock.patch.object(driver.eventhandler, 'InstanceEventHandler') - def test_init_host(self, mock_InstanceEventHandler): - self.driver.init_host(mock.sentinel.host) - - mock_start_console_handlers = ( - self.driver._serialconsoleops.start_console_handlers) - mock_start_console_handlers.assert_called_once_with() - mock_InstanceEventHandler.assert_called_once_with( - state_change_callback=self.driver.emit_event) - fake_event_handler = mock_InstanceEventHandler.return_value - fake_event_handler.start_listener.assert_called_once_with() - - def test_list_instance_uuids(self): - self.driver.list_instance_uuids() - self.driver._vmops.list_instance_uuids.assert_called_once_with() - - def test_list_instances(self): - self.driver.list_instances() - self.driver._vmops.list_instances.assert_called_once_with() - - def test_spawn(self): - self.driver.spawn( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta, mock.sentinel.injected_files, - mock.sentinel.admin_password, mock.sentinel.allocations, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.spawn.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_meta, mock.sentinel.injected_files, - mock.sentinel.admin_password, mock.sentinel.network_info, - mock.sentinel.block_device_info) - - def test_reboot(self): - self.driver.reboot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.reboot_type, - mock.sentinel.block_device_info, mock.sentinel.bad_vol_callback) - - self.driver._vmops.reboot.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info, - mock.sentinel.reboot_type) - - def test_destroy(self): - self.driver.destroy( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks) - - self.driver._vmops.destroy.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.destroy_disks) - - def test_cleanup(self): - self.driver.cleanup( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks, mock.sentinel.migrate_data, - mock.sentinel.destroy_vifs) - - self.driver._vmops.unplug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_get_info(self): - self.driver.get_info(mock.sentinel.instance) - self.driver._vmops.get_info.assert_called_once_with( - mock.sentinel.instance) - - def test_attach_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.attach_volume( - mock.sentinel.context, mock.sentinel.connection_info, - mock_instance, mock.sentinel.mountpoint, mock.sentinel.disk_bus, - mock.sentinel.device_type, mock.sentinel.encryption) - - self.driver._volumeops.attach_volume.assert_called_once_with( - mock.sentinel.connection_info, - mock_instance.name) - - def test_detach_volume(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.detach_volume( - mock.sentinel.context, mock.sentinel.connection_info, - mock_instance, mock.sentinel.mountpoint, mock.sentinel.encryption) - - self.driver._volumeops.detach_volume.assert_called_once_with( - mock.sentinel.connection_info, - mock_instance.name) - - def test_get_volume_connector(self): - self.driver.get_volume_connector(mock.sentinel.instance) - self.driver._volumeops.get_volume_connector.assert_called_once_with() - - def test_get_available_resource(self): - self.driver.get_available_resource(mock.sentinel.nodename) - self.driver._hostops.get_available_resource.assert_called_once_with() - - def test_get_available_nodes(self): - response = self.driver.get_available_nodes(mock.sentinel.refresh) - self.assertEqual([platform.node()], response) - - def test_host_power_action(self): - self.driver.host_power_action(mock.sentinel.action) - self.driver._hostops.host_power_action.assert_called_once_with( - mock.sentinel.action) - - def test_snapshot(self): - self.driver.snapshot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_id, mock.sentinel.update_task_state) - - self.driver._snapshotops.snapshot.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.image_id, mock.sentinel.update_task_state) - - def test_pause(self): - self.driver.pause(mock.sentinel.instance) - self.driver._vmops.pause.assert_called_once_with( - mock.sentinel.instance) - - def test_unpause(self): - self.driver.unpause(mock.sentinel.instance) - self.driver._vmops.unpause.assert_called_once_with( - mock.sentinel.instance) - - def test_suspend(self): - self.driver.suspend(mock.sentinel.context, mock.sentinel.instance) - self.driver._vmops.suspend.assert_called_once_with( - mock.sentinel.instance) - - def test_resume(self): - self.driver.resume( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.resume.assert_called_once_with( - mock.sentinel.instance) - - def test_power_off(self): - self.driver.power_off( - mock.sentinel.instance, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - self.driver._vmops.power_off.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - def test_power_on(self): - self.driver.power_on( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.power_on.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.block_device_info, - mock.sentinel.network_info) - - def test_resume_state_on_host_boot(self): - self.driver.resume_state_on_host_boot( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - self.driver._vmops.resume_state_on_host_boot.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info) - - def test_live_migration(self): - self.driver.live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest, mock.sentinel.post_method, - mock.sentinel.recover_method, mock.sentinel.block_migration, - mock.sentinel.migrate_data) - - self.driver._livemigrationops.live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest, mock.sentinel.post_method, - mock.sentinel.recover_method, mock.sentinel.block_migration, - mock.sentinel.migrate_data) - - @mock.patch.object(driver.HyperVDriver, 'destroy') - def test_rollback_live_migration_at_destination(self, mock_destroy): - self.driver.rollback_live_migration_at_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.destroy_disks, mock.sentinel.migrate_data) - - mock_destroy.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - destroy_disks=mock.sentinel.destroy_disks) - - def test_pre_live_migration(self): - migrate_data = self.driver.pre_live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.network_info, - mock.sentinel.disk_info, mock.sentinel.migrate_data) - - self.assertEqual(mock.sentinel.migrate_data, migrate_data) - pre_live_migration = self.driver._livemigrationops.pre_live_migration - pre_live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.network_info) - - def test_post_live_migration(self): - self.driver.post_live_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, mock.sentinel.migrate_data) - - post_live_migration = self.driver._livemigrationops.post_live_migration - post_live_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.block_device_info, - mock.sentinel.migrate_data) - - def test_post_live_migration_at_destination(self): - self.driver.post_live_migration_at_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_migration, - mock.sentinel.block_device_info) - - mtd = self.driver._livemigrationops.post_live_migration_at_destination - mtd.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_migration) - - def test_check_can_live_migrate_destination(self): - self.driver.check_can_live_migrate_destination( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, - mock.sentinel.block_migration, mock.sentinel.disk_over_commit) - - mtd = self.driver._livemigrationops.check_can_live_migrate_destination - mtd.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.src_compute_info, mock.sentinel.dst_compute_info, - mock.sentinel.block_migration, mock.sentinel.disk_over_commit) - - def test_cleanup_live_migration_destination_check(self): - self.driver.cleanup_live_migration_destination_check( - mock.sentinel.context, mock.sentinel.dest_check_data) - - _livemigrops = self.driver._livemigrationops - method = _livemigrops.cleanup_live_migration_destination_check - method.assert_called_once_with( - mock.sentinel.context, mock.sentinel.dest_check_data) - - def test_check_can_live_migrate_source(self): - self.driver.check_can_live_migrate_source( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest_check_data, mock.sentinel.block_device_info) - - method = self.driver._livemigrationops.check_can_live_migrate_source - method.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.dest_check_data) - - def test_plug_vifs(self): - self.driver.plug_vifs( - mock.sentinel.instance, mock.sentinel.network_info) - - self.driver._vmops.plug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_unplug_vifs(self): - self.driver.unplug_vifs( - mock.sentinel.instance, mock.sentinel.network_info) - - self.driver._vmops.unplug_vifs.assert_called_once_with( - mock.sentinel.instance, mock.sentinel.network_info) - - def test_migrate_disk_and_power_off(self): - self.driver.migrate_disk_and_power_off( - mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, - mock.sentinel.flavor, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - migr_power_off = self.driver._migrationops.migrate_disk_and_power_off - migr_power_off.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest, - mock.sentinel.flavor, mock.sentinel.network_info, - mock.sentinel.block_device_info, mock.sentinel.timeout, - mock.sentinel.retry_interval) - - def test_confirm_migration(self): - self.driver.confirm_migration( - mock.sentinel.context, - mock.sentinel.migration, mock.sentinel.instance, - mock.sentinel.network_info) - - self.driver._migrationops.confirm_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration, mock.sentinel.instance, - mock.sentinel.network_info) - - def test_finish_revert_migration(self): - self.driver.finish_revert_migration( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.migration, - mock.sentinel.block_device_info, mock.sentinel.power_on) - - finish_revert_migr = self.driver._migrationops.finish_revert_migration - finish_revert_migr.assert_called_once_with( - mock.sentinel.context, mock.sentinel.instance, - mock.sentinel.network_info, mock.sentinel.block_device_info, - mock.sentinel.power_on) - - def test_finish_migration(self): - self.driver.finish_migration( - mock.sentinel.context, mock.sentinel.migration, - mock.sentinel.instance, mock.sentinel.disk_info, - mock.sentinel.network_info, mock.sentinel.image_meta, - mock.sentinel.resize_instance, mock.sentinel.allocations, - mock.sentinel.block_device_info, - mock.sentinel.power_on) - - self.driver._migrationops.finish_migration.assert_called_once_with( - mock.sentinel.context, mock.sentinel.migration, - mock.sentinel.instance, mock.sentinel.disk_info, - mock.sentinel.network_info, mock.sentinel.image_meta, - mock.sentinel.resize_instance, mock.sentinel.block_device_info, - mock.sentinel.power_on) - - def test_get_host_ip_addr(self): - self.driver.get_host_ip_addr() - - self.driver._hostops.get_host_ip_addr.assert_called_once_with() - - def test_get_host_uptime(self): - self.driver.get_host_uptime() - self.driver._hostops.get_host_uptime.assert_called_once_with() - - def test_get_rdp_console(self): - self.driver.get_rdp_console( - mock.sentinel.context, mock.sentinel.instance) - self.driver._rdpconsoleops.get_rdp_console.assert_called_once_with( - mock.sentinel.instance) - - def test_get_console_output(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.get_console_output(self.context, mock_instance) - - mock_get_console_output = ( - self.driver._serialconsoleops.get_console_output) - mock_get_console_output.assert_called_once_with( - mock_instance.name) - - def test_get_serial_console(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.driver.get_console_output(self.context, mock_instance) - - mock_get_serial_console = ( - self.driver._serialconsoleops.get_console_output) - mock_get_serial_console.assert_called_once_with( - mock_instance.name) - - def test_manage_image_cache(self): - self.driver.manage_image_cache(mock.sentinel.context, - mock.sentinel.all_instances) - self.driver._imagecache.update.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances) - - @mock.patch.object(driver.HyperVDriver, '_get_allocation_ratios') - def test_update_provider_tree(self, mock_get_alloc_ratios): - mock_ptree = mock.Mock() - mock_inventory = mock_ptree.data.return_value.inventory - - self.driver.update_provider_tree( - mock_ptree, mock.sentinel.nodename, mock.sentinel.allocations) - - mock_get_alloc_ratios.assert_called_once_with(mock_inventory) - self.driver._hostops.update_provider_tree.assert_called_once_with( - mock_ptree, mock.sentinel.nodename, - mock_get_alloc_ratios.return_value, - mock.sentinel.allocations) diff --git a/nova/tests/unit/virt/hyperv/test_eventhandler.py b/nova/tests/unit/virt/hyperv/test_eventhandler.py deleted file mode 100644 index 9825bc9141a0..000000000000 --- a/nova/tests/unit/virt/hyperv/test_eventhandler.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win import constants -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from unittest import mock - -from nova.tests.unit.virt.hyperv import test_base -from nova import utils -from nova.virt.hyperv import eventhandler - - -class EventHandlerTestCase(test_base.HyperVBaseTestCase): - _FAKE_POLLING_INTERVAL = 3 - _FAKE_EVENT_CHECK_TIMEFRAME = 15 - - @mock.patch.object(utilsfactory, 'get_vmutils') - def setUp(self, mock_get_vmutils): - super(EventHandlerTestCase, self).setUp() - - self._state_change_callback = mock.Mock() - self.flags( - power_state_check_timeframe=self._FAKE_EVENT_CHECK_TIMEFRAME, - group='hyperv') - self.flags( - power_state_event_polling_interval=self._FAKE_POLLING_INTERVAL, - group='hyperv') - - self._event_handler = eventhandler.InstanceEventHandler( - self._state_change_callback) - self._event_handler._serial_console_ops = mock.Mock() - - @mock.patch.object(eventhandler.InstanceEventHandler, - '_get_instance_uuid') - @mock.patch.object(eventhandler.InstanceEventHandler, '_emit_event') - def _test_event_callback(self, mock_emit_event, mock_get_uuid, - missing_uuid=False): - mock_get_uuid.return_value = ( - mock.sentinel.instance_uuid if not missing_uuid else None) - self._event_handler._vmutils.get_vm_power_state.return_value = ( - mock.sentinel.power_state) - - self._event_handler._event_callback(mock.sentinel.instance_name, - mock.sentinel.power_state) - - if not missing_uuid: - mock_emit_event.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.instance_uuid, - mock.sentinel.power_state) - else: - self.assertFalse(mock_emit_event.called) - - def test_event_callback_uuid_present(self): - self._test_event_callback() - - def test_event_callback_missing_uuid(self): - self._test_event_callback(missing_uuid=True) - - @mock.patch.object(eventhandler.InstanceEventHandler, '_get_virt_event') - @mock.patch.object(utils, 'spawn_n') - def test_emit_event(self, mock_spawn, mock_get_event): - self._event_handler._emit_event(mock.sentinel.instance_name, - mock.sentinel.instance_uuid, - mock.sentinel.instance_state) - - virt_event = mock_get_event.return_value - mock_spawn.assert_has_calls( - [mock.call(self._state_change_callback, virt_event), - mock.call(self._event_handler._handle_serial_console_workers, - mock.sentinel.instance_name, - mock.sentinel.instance_state)]) - - def test_handle_serial_console_instance_running(self): - self._event_handler._handle_serial_console_workers( - mock.sentinel.instance_name, - constants.HYPERV_VM_STATE_ENABLED) - serialops = self._event_handler._serial_console_ops - serialops.start_console_handler.assert_called_once_with( - mock.sentinel.instance_name) - - def test_handle_serial_console_instance_stopped(self): - self._event_handler._handle_serial_console_workers( - mock.sentinel.instance_name, - constants.HYPERV_VM_STATE_DISABLED) - serialops = self._event_handler._serial_console_ops - serialops.stop_console_handler.assert_called_once_with( - mock.sentinel.instance_name) - - def _test_get_instance_uuid(self, instance_found=True, - missing_uuid=False): - if instance_found: - side_effect = (mock.sentinel.instance_uuid - if not missing_uuid else None, ) - else: - side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.instance_name) - mock_get_uuid = self._event_handler._vmutils.get_instance_uuid - mock_get_uuid.side_effect = side_effect - - instance_uuid = self._event_handler._get_instance_uuid( - mock.sentinel.instance_name) - - expected_uuid = (mock.sentinel.instance_uuid - if instance_found and not missing_uuid else None) - self.assertEqual(expected_uuid, instance_uuid) - - def test_get_nova_created_instance_uuid(self): - self._test_get_instance_uuid() - - def test_get_deleted_instance_uuid(self): - self._test_get_instance_uuid(instance_found=False) - - def test_get_instance_uuid_missing_notes(self): - self._test_get_instance_uuid(missing_uuid=True) - - @mock.patch('nova.virt.event.LifecycleEvent') - def test_get_virt_event(self, mock_lifecycle_event): - instance_state = constants.HYPERV_VM_STATE_ENABLED - expected_transition = self._event_handler._TRANSITION_MAP[ - instance_state] - - virt_event = self._event_handler._get_virt_event( - mock.sentinel.instance_uuid, instance_state) - - self.assertEqual(mock_lifecycle_event.return_value, - virt_event) - mock_lifecycle_event.assert_called_once_with( - uuid=mock.sentinel.instance_uuid, - transition=expected_transition) diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py deleted file mode 100644 index 04434dd37e67..000000000000 --- a/nova/tests/unit/virt/hyperv/test_hostops.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -from unittest import mock - -import os_resource_classes as orc -from os_win import constants as os_win_const -from oslo_config import cfg -from oslo_serialization import jsonutils -from oslo_utils import units - -from nova.objects import fields as obj_fields -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import hostops - -CONF = cfg.CONF - - -class HostOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V HostOps class.""" - - FAKE_ARCHITECTURE = 0 - FAKE_NAME = 'fake_name' - FAKE_MANUFACTURER = 'FAKE_MANUFACTURER' - FAKE_NUM_CPUS = 1 - FAKE_INSTANCE_DIR = "C:/fake/dir" - FAKE_LOCAL_IP = '10.11.12.13' - FAKE_TICK_COUNT = 1000000 - - def setUp(self): - super(HostOpsTestCase, self).setUp() - self._hostops = hostops.HostOps() - self._hostops._hostutils = mock.MagicMock() - self._hostops._pathutils = mock.MagicMock() - self._hostops._diskutils = mock.MagicMock() - - def test_get_cpu_info(self): - mock_processors = mock.MagicMock() - info = {'Architecture': self.FAKE_ARCHITECTURE, - 'Name': self.FAKE_NAME, - 'Manufacturer': self.FAKE_MANUFACTURER, - 'NumberOfCores': self.FAKE_NUM_CPUS, - 'NumberOfLogicalProcessors': self.FAKE_NUM_CPUS} - - def getitem(key): - return info[key] - mock_processors.__getitem__.side_effect = getitem - self._hostops._hostutils.get_cpus_info.return_value = [mock_processors] - - response = self._hostops._get_cpu_info() - - self._hostops._hostutils.get_cpus_info.assert_called_once_with() - - expected = [mock.call(fkey) - for fkey in os_win_const.PROCESSOR_FEATURE.keys()] - self._hostops._hostutils.is_cpu_feature_present.assert_has_calls( - expected, any_order=True) - expected_response = self._get_mock_cpu_info() - self.assertEqual(expected_response, response) - - def _get_mock_cpu_info(self): - return {'vendor': self.FAKE_MANUFACTURER, - 'model': self.FAKE_NAME, - 'arch': constants.WMI_WIN32_PROCESSOR_ARCHITECTURE[ - self.FAKE_ARCHITECTURE], - 'features': list(os_win_const.PROCESSOR_FEATURE.values()), - 'topology': {'cores': self.FAKE_NUM_CPUS, - 'threads': self.FAKE_NUM_CPUS, - 'sockets': self.FAKE_NUM_CPUS}} - - def _get_mock_gpu_info(self): - return {'remotefx_total_video_ram': 4096, - 'remotefx_available_video_ram': 2048, - 'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO} - - def test_get_memory_info(self): - self._hostops._hostutils.get_memory_info.return_value = (2 * units.Ki, - 1 * units.Ki) - response = self._hostops._get_memory_info() - self._hostops._hostutils.get_memory_info.assert_called_once_with() - self.assertEqual((2, 1, 1), response) - - def test_get_storage_info_gb(self): - self._hostops._pathutils.get_instances_dir.return_value = '' - self._hostops._diskutils.get_disk_capacity.return_value = ( - 2 * units.Gi, 1 * units.Gi) - - response = self._hostops._get_storage_info_gb() - self._hostops._pathutils.get_instances_dir.assert_called_once_with() - self._hostops._diskutils.get_disk_capacity.assert_called_once_with('') - self.assertEqual((2, 1, 1), response) - - def test_get_hypervisor_version(self): - self._hostops._hostutils.get_windows_version.return_value = '6.3.9600' - response_lower = self._hostops._get_hypervisor_version() - - self._hostops._hostutils.get_windows_version.return_value = '10.1.0' - response_higher = self._hostops._get_hypervisor_version() - - self.assertEqual(6003, response_lower) - self.assertEqual(10001, response_higher) - - def test_get_remotefx_gpu_info(self): - self.flags(enable_remotefx=True, group='hyperv') - fake_gpus = [{'total_video_ram': '2048', - 'available_video_ram': '1024'}, - {'total_video_ram': '1024', - 'available_video_ram': '1024'}] - self._hostops._hostutils.get_remotefx_gpu_info.return_value = fake_gpus - - ret_val = self._hostops._get_remotefx_gpu_info() - - self.assertEqual(3072, ret_val['total_video_ram']) - self.assertEqual(1024, ret_val['used_video_ram']) - - def test_get_remotefx_gpu_info_disabled(self): - self.flags(enable_remotefx=False, group='hyperv') - - ret_val = self._hostops._get_remotefx_gpu_info() - - self.assertEqual(0, ret_val['total_video_ram']) - self.assertEqual(0, ret_val['used_video_ram']) - self._hostops._hostutils.get_remotefx_gpu_info.assert_not_called() - - @mock.patch.object(hostops.objects, 'NUMACell') - @mock.patch.object(hostops.objects, 'NUMATopology') - def test_get_host_numa_topology(self, mock_NUMATopology, mock_NUMACell): - numa_node = {'id': mock.sentinel.id, 'memory': mock.sentinel.memory, - 'memory_usage': mock.sentinel.memory_usage, - 'cpuset': mock.sentinel.cpuset, - 'cpu_usage': mock.sentinel.cpu_usage} - self._hostops._hostutils.get_numa_nodes.return_value = [ - numa_node.copy()] - - result = self._hostops._get_host_numa_topology() - - self.assertEqual(mock_NUMATopology.return_value, result) - mock_NUMACell.assert_called_once_with( - pinned_cpus=set([]), mempages=[], siblings=[], **numa_node) - mock_NUMATopology.assert_called_once_with( - cells=[mock_NUMACell.return_value]) - - @mock.patch.object(hostops.HostOps, '_get_pci_passthrough_devices') - @mock.patch.object(hostops.HostOps, '_get_host_numa_topology') - @mock.patch.object(hostops.HostOps, '_get_remotefx_gpu_info') - @mock.patch.object(hostops.HostOps, '_get_cpu_info') - @mock.patch.object(hostops.HostOps, '_get_memory_info') - @mock.patch.object(hostops.HostOps, '_get_hypervisor_version') - @mock.patch.object(hostops.HostOps, '_get_storage_info_gb') - @mock.patch('platform.node') - def test_get_available_resource(self, mock_node, - mock_get_storage_info_gb, - mock_get_hypervisor_version, - mock_get_memory_info, mock_get_cpu_info, - mock_get_gpu_info, mock_get_numa_topology, - mock_get_pci_devices): - mock_get_storage_info_gb.return_value = (mock.sentinel.LOCAL_GB, - mock.sentinel.LOCAL_GB_FREE, - mock.sentinel.LOCAL_GB_USED) - mock_get_memory_info.return_value = (mock.sentinel.MEMORY_MB, - mock.sentinel.MEMORY_MB_FREE, - mock.sentinel.MEMORY_MB_USED) - mock_cpu_info = self._get_mock_cpu_info() - mock_get_cpu_info.return_value = mock_cpu_info - mock_get_hypervisor_version.return_value = mock.sentinel.VERSION - mock_get_numa_topology.return_value._to_json.return_value = ( - mock.sentinel.numa_topology_json) - mock_get_pci_devices.return_value = mock.sentinel.pcis - - mock_gpu_info = self._get_mock_gpu_info() - mock_get_gpu_info.return_value = mock_gpu_info - - response = self._hostops.get_available_resource() - - mock_get_memory_info.assert_called_once_with() - mock_get_cpu_info.assert_called_once_with() - mock_get_hypervisor_version.assert_called_once_with() - mock_get_pci_devices.assert_called_once_with() - expected = {'supported_instances': [("i686", "hyperv", "hvm"), - ("x86_64", "hyperv", "hvm")], - 'hypervisor_hostname': mock_node(), - 'cpu_info': jsonutils.dumps(mock_cpu_info), - 'hypervisor_version': mock.sentinel.VERSION, - 'memory_mb': mock.sentinel.MEMORY_MB, - 'memory_mb_used': mock.sentinel.MEMORY_MB_USED, - 'local_gb': mock.sentinel.LOCAL_GB, - 'local_gb_used': mock.sentinel.LOCAL_GB_USED, - 'disk_available_least': mock.sentinel.LOCAL_GB_FREE, - 'vcpus': self.FAKE_NUM_CPUS, - 'vcpus_used': 0, - 'hypervisor_type': 'hyperv', - 'numa_topology': mock.sentinel.numa_topology_json, - 'remotefx_available_video_ram': 2048, - 'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO, - 'remotefx_total_video_ram': 4096, - 'pci_passthrough_devices': mock.sentinel.pcis, - } - self.assertEqual(expected, response) - - @mock.patch.object(hostops.jsonutils, 'dumps') - def test_get_pci_passthrough_devices(self, mock_jsonutils_dumps): - mock_pci_dev = {'vendor_id': 'fake_vendor_id', - 'product_id': 'fake_product_id', - 'dev_id': 'fake_dev_id', - 'address': 'fake_address'} - mock_get_pcis = self._hostops._hostutils.get_pci_passthrough_devices - mock_get_pcis.return_value = [mock_pci_dev] - - expected_label = 'label_%(vendor_id)s_%(product_id)s' % { - 'vendor_id': mock_pci_dev['vendor_id'], - 'product_id': mock_pci_dev['product_id']} - expected_pci_dev = mock_pci_dev.copy() - expected_pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD, - label= expected_label, - numa_node=None) - - result = self._hostops._get_pci_passthrough_devices() - - self.assertEqual(mock_jsonutils_dumps.return_value, result) - mock_jsonutils_dumps.assert_called_once_with([expected_pci_dev]) - - def _test_host_power_action(self, action): - self._hostops._hostutils.host_power_action = mock.Mock() - - self._hostops.host_power_action(action) - self._hostops._hostutils.host_power_action.assert_called_with( - action) - - def test_host_power_action_shutdown(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN) - - def test_host_power_action_reboot(self): - self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT) - - def test_host_power_action_exception(self): - self.assertRaises(NotImplementedError, - self._hostops.host_power_action, - constants.HOST_POWER_ACTION_STARTUP) - - def test_get_host_ip_addr(self): - CONF.set_override('my_ip', None) - self._hostops._hostutils.get_local_ips.return_value = [ - self.FAKE_LOCAL_IP] - response = self._hostops.get_host_ip_addr() - self._hostops._hostutils.get_local_ips.assert_called_once_with() - self.assertEqual(self.FAKE_LOCAL_IP, response) - - @mock.patch('time.strftime') - def test_get_host_uptime(self, mock_time): - self._hostops._hostutils.get_host_tick_count64.return_value = ( - self.FAKE_TICK_COUNT) - - response = self._hostops.get_host_uptime() - tdelta = datetime.timedelta(milliseconds=int(self.FAKE_TICK_COUNT)) - expected = "%s up %s, 0 users, load average: 0, 0, 0" % ( - str(mock_time()), str(tdelta)) - - self.assertEqual(expected, response) - - @mock.patch.object(hostops.HostOps, 'get_available_resource') - def test_update_provider_tree(self, mock_get_avail_res): - resources = mock.MagicMock() - allocation_ratios = mock.MagicMock() - provider_tree = mock.Mock() - - mock_get_avail_res.return_value = resources - - self.flags(reserved_host_disk_mb=1) - - exp_inventory = { - orc.VCPU: { - 'total': resources['vcpus'], - 'min_unit': 1, - 'max_unit': resources['vcpus'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.VCPU], - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': resources['memory_mb'], - 'min_unit': 1, - 'max_unit': resources['memory_mb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], - 'reserved': CONF.reserved_host_memory_mb, - }, - orc.DISK_GB: { - 'total': resources['local_gb'], - 'min_unit': 1, - 'max_unit': resources['local_gb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.DISK_GB], - 'reserved': 1, - }, - } - - self._hostops.update_provider_tree( - provider_tree, mock.sentinel.node_name, allocation_ratios, - mock.sentinel.allocations) - - provider_tree.update_inventory.assert_called_once_with( - mock.sentinel.node_name, - exp_inventory) diff --git a/nova/tests/unit/virt/hyperv/test_imagecache.py b/nova/tests/unit/virt/hyperv/test_imagecache.py deleted file mode 100644 index 827d52133df0..000000000000 --- a/nova/tests/unit/virt/hyperv/test_imagecache.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -import fixtures -from oslo_config import cfg -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import units - -from nova import exception -from nova import objects -from nova.tests.unit import fake_instance -from nova.tests.unit.objects import test_flavor -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import imagecache - -CONF = cfg.CONF - - -@ddt.ddt -class ImageCacheTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V ImageCache class.""" - - FAKE_FORMAT = 'fake_format' - FAKE_IMAGE_REF = 'fake_image_ref' - FAKE_VHD_SIZE_GB = 1 - - def setUp(self): - super(ImageCacheTestCase, self).setUp() - - self.context = 'fake-context' - self.instance = fake_instance.fake_instance_obj(self.context) - - # utilsfactory will check the host OS version via get_hostutils, - # in order to return the proper Utils Class, so it must be mocked. - patched_get_hostutils = mock.patch.object(imagecache.utilsfactory, - "get_hostutils") - patched_get_vhdutils = mock.patch.object(imagecache.utilsfactory, - "get_vhdutils") - patched_get_hostutils.start() - patched_get_vhdutils.start() - - self.addCleanup(patched_get_hostutils.stop) - self.addCleanup(patched_get_vhdutils.stop) - - self.imagecache = imagecache.ImageCache() - self.imagecache._pathutils = mock.MagicMock() - self.imagecache._vhdutils = mock.MagicMock() - - self.tmpdir = self.useFixture(fixtures.TempDir()).path - - def _test_get_root_vhd_size_gb(self, old_flavor=True): - if old_flavor: - mock_flavor = objects.Flavor(**test_flavor.fake_flavor) - self.instance.old_flavor = mock_flavor - else: - self.instance.old_flavor = None - return self.imagecache._get_root_vhd_size_gb(self.instance) - - def test_get_root_vhd_size_gb_old_flavor(self): - ret_val = self._test_get_root_vhd_size_gb() - self.assertEqual(test_flavor.fake_flavor['root_gb'], ret_val) - - def test_get_root_vhd_size_gb(self): - ret_val = self._test_get_root_vhd_size_gb(old_flavor=False) - self.assertEqual(self.instance.flavor.root_gb, ret_val) - - @mock.patch.object(imagecache.ImageCache, '_get_root_vhd_size_gb') - def test_resize_and_cache_vhd_smaller(self, mock_get_vhd_size_gb): - self.imagecache._vhdutils.get_vhd_size.return_value = { - 'VirtualSize': (self.FAKE_VHD_SIZE_GB + 1) * units.Gi - } - mock_get_vhd_size_gb.return_value = self.FAKE_VHD_SIZE_GB - mock_internal_vhd_size = ( - self.imagecache._vhdutils.get_internal_vhd_size_by_file_size) - mock_internal_vhd_size.return_value = self.FAKE_VHD_SIZE_GB * units.Gi - - self.assertRaises(exception.FlavorDiskSmallerThanImage, - self.imagecache._resize_and_cache_vhd, - mock.sentinel.instance, - mock.sentinel.vhd_path) - - self.imagecache._vhdutils.get_vhd_size.assert_called_once_with( - mock.sentinel.vhd_path) - mock_get_vhd_size_gb.assert_called_once_with(mock.sentinel.instance) - mock_internal_vhd_size.assert_called_once_with( - mock.sentinel.vhd_path, self.FAKE_VHD_SIZE_GB * units.Gi) - - def _prepare_get_cached_image(self, path_exists=False, use_cow=False, - rescue_image_id=None): - self.instance.image_ref = self.FAKE_IMAGE_REF - self.imagecache._pathutils.get_base_vhd_dir.return_value = ( - self.tmpdir) - self.imagecache._pathutils.exists.return_value = path_exists - self.imagecache._vhdutils.get_vhd_format.return_value = ( - constants.DISK_FORMAT_VHD) - - CONF.set_override('use_cow_images', use_cow) - - image_file_name = rescue_image_id or self.FAKE_IMAGE_REF - expected_path = os.path.join(self.tmpdir, - image_file_name) - expected_vhd_path = "%s.%s" % (expected_path, - constants.DISK_FORMAT_VHD.lower()) - return (expected_path, expected_vhd_path) - - @mock.patch.object(imagecache.images, 'fetch') - def test_get_cached_image_with_fetch(self, mock_fetch): - (expected_path, - expected_vhd_path) = self._prepare_get_cached_image(False, False) - - result = self.imagecache.get_cached_image(self.context, self.instance) - self.assertEqual(expected_vhd_path, result) - - mock_fetch.assert_called_once_with(self.context, self.FAKE_IMAGE_REF, - expected_path) - self.imagecache._vhdutils.get_vhd_format.assert_called_once_with( - expected_path) - self.imagecache._pathutils.rename.assert_called_once_with( - expected_path, expected_vhd_path) - - @mock.patch.object(imagecache.images, 'fetch') - def test_get_cached_image_with_fetch_exception(self, mock_fetch): - (expected_path, - expected_vhd_path) = self._prepare_get_cached_image(False, False) - - # path doesn't exist until fetched. - self.imagecache._pathutils.exists.side_effect = [False, False, True] - mock_fetch.side_effect = exception.InvalidImageRef( - image_href=self.FAKE_IMAGE_REF) - - self.assertRaises(exception.InvalidImageRef, - self.imagecache.get_cached_image, - self.context, self.instance) - - self.imagecache._pathutils.remove.assert_called_once_with( - expected_path) - - @mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd') - def test_get_cached_image_use_cow(self, mock_resize): - (expected_path, - expected_vhd_path) = self._prepare_get_cached_image(True, True) - - expected_resized_vhd_path = expected_vhd_path + 'x' - mock_resize.return_value = expected_resized_vhd_path - - result = self.imagecache.get_cached_image(self.context, self.instance) - self.assertEqual(expected_resized_vhd_path, result) - - mock_resize.assert_called_once_with(self.instance, expected_vhd_path) - - @mock.patch.object(imagecache.images, 'fetch') - def test_cache_rescue_image_bigger_than_flavor(self, mock_fetch): - fake_rescue_image_id = 'fake_rescue_image_id' - - self.imagecache._vhdutils.get_vhd_info.return_value = { - 'VirtualSize': (self.instance.flavor.root_gb + 1) * units.Gi} - (expected_path, - expected_vhd_path) = self._prepare_get_cached_image( - rescue_image_id=fake_rescue_image_id) - - self.assertRaises(exception.ImageUnacceptable, - self.imagecache.get_cached_image, - self.context, self.instance, - fake_rescue_image_id) - - mock_fetch.assert_called_once_with(self.context, - fake_rescue_image_id, - expected_path) - self.imagecache._vhdutils.get_vhd_info.assert_called_once_with( - expected_vhd_path) - - @ddt.data(True, False) - def test_age_and_verify_cached_images(self, remove_unused_base_images): - self.flags(remove_unused_base_images=remove_unused_base_images, - group='image_cache') - - fake_images = [mock.sentinel.FAKE_IMG1, mock.sentinel.FAKE_IMG2] - fake_used_images = [mock.sentinel.FAKE_IMG1] - - self.imagecache.originals = fake_images - self.imagecache.used_images = fake_used_images - - self.imagecache._update_image_timestamp = mock.Mock() - self.imagecache._remove_if_old_image = mock.Mock() - - self.imagecache._age_and_verify_cached_images( - mock.sentinel.FAKE_CONTEXT, - mock.sentinel.all_instances, - mock.sentinel.tmpdir) - - self.imagecache._update_image_timestamp.assert_called_once_with( - mock.sentinel.FAKE_IMG1) - - if remove_unused_base_images: - self.imagecache._remove_if_old_image.assert_called_once_with( - mock.sentinel.FAKE_IMG2) - else: - self.imagecache._remove_if_old_image.assert_not_called() - - @mock.patch.object(imagecache.os, 'utime') - @mock.patch.object(imagecache.ImageCache, '_get_image_backing_files') - def test_update_image_timestamp(self, mock_get_backing_files, mock_utime): - mock_get_backing_files.return_value = [mock.sentinel.backing_file, - mock.sentinel.resized_file] - - self.imagecache._update_image_timestamp(mock.sentinel.image) - - mock_get_backing_files.assert_called_once_with(mock.sentinel.image) - mock_utime.assert_has_calls([ - mock.call(mock.sentinel.backing_file, None), - mock.call(mock.sentinel.resized_file, None)]) - - def test_get_image_backing_files(self): - image = 'fake-img' - self.imagecache.unexplained_images = ['%s_42' % image, - 'unexplained-img'] - self.imagecache._pathutils.get_image_path.side_effect = [ - mock.sentinel.base_file, mock.sentinel.resized_file] - - backing_files = self.imagecache._get_image_backing_files(image) - - self.assertEqual([mock.sentinel.base_file, mock.sentinel.resized_file], - backing_files) - self.imagecache._pathutils.get_image_path.assert_has_calls( - [mock.call(image), mock.call('%s_42' % image)]) - - @mock.patch.object(imagecache.ImageCache, '_get_image_backing_files') - def test_remove_if_old_image(self, mock_get_backing_files): - mock_get_backing_files.return_value = [mock.sentinel.backing_file, - mock.sentinel.resized_file] - self.imagecache._pathutils.get_age_of_file.return_value = 3600 - - self.imagecache._remove_if_old_image(mock.sentinel.image) - - calls = [mock.call(mock.sentinel.backing_file), - mock.call(mock.sentinel.resized_file)] - self.imagecache._pathutils.get_age_of_file.assert_has_calls(calls) - mock_get_backing_files.assert_called_once_with(mock.sentinel.image) - - def test_remove_old_image(self): - fake_img_path = os.path.join(self.tmpdir, - self.FAKE_IMAGE_REF) - self.imagecache._remove_old_image(fake_img_path) - self.imagecache._pathutils.remove.assert_called_once_with( - fake_img_path) - - @mock.patch.object(imagecache.ImageCache, '_age_and_verify_cached_images') - @mock.patch.object(imagecache.ImageCache, '_list_base_images') - @mock.patch.object(imagecache.ImageCache, '_list_running_instances') - def test_update(self, mock_list_instances, mock_list_images, - mock_age_cached_images): - base_vhd_dir = self.imagecache._pathutils.get_base_vhd_dir.return_value - mock_list_instances.return_value = { - 'used_images': {mock.sentinel.image: mock.sentinel.instances}} - mock_list_images.return_value = { - 'originals': [mock.sentinel.original_image], - 'unexplained_images': [mock.sentinel.unexplained_image]} - - self.imagecache.update(mock.sentinel.context, - mock.sentinel.all_instances) - - self.assertEqual([mock.sentinel.image], - list(self.imagecache.used_images)) - self.assertEqual([mock.sentinel.original_image], - self.imagecache.originals) - self.assertEqual([mock.sentinel.unexplained_image], - self.imagecache.unexplained_images) - mock_list_instances.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances) - mock_list_images.assert_called_once_with(base_vhd_dir) - mock_age_cached_images.assert_called_once_with( - mock.sentinel.context, mock.sentinel.all_instances, base_vhd_dir) - - @mock.patch.object(imagecache.os, 'listdir') - def test_list_base_images(self, mock_listdir): - original_image = uuids.fake - unexplained_image = 'just-an-image' - ignored_file = 'foo.bar' - mock_listdir.return_value = ['%s.VHD' % original_image, - '%s.vhdx' % unexplained_image, - ignored_file] - - images = self.imagecache._list_base_images(mock.sentinel.base_dir) - - self.assertEqual([original_image], images['originals']) - self.assertEqual([unexplained_image], images['unexplained_images']) - mock_listdir.assert_called_once_with(mock.sentinel.base_dir) diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py deleted file mode 100644 index 79cb4318c58e..000000000000 --- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win import exceptions as os_win_exc -from unittest import mock - -from oslo_config import cfg - -from nova import exception -from nova.objects import migrate_data as migrate_data_obj -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import livemigrationops -from nova.virt.hyperv import serialconsoleops - -CONF = cfg.CONF - - -class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V LiveMigrationOps class.""" - - def setUp(self): - super(LiveMigrationOpsTestCase, self).setUp() - self.context = 'fake_context' - self._livemigrops = livemigrationops.LiveMigrationOps() - self._livemigrops._livemigrutils = mock.MagicMock() - self._livemigrops._pathutils = mock.MagicMock() - self._livemigrops._block_dev_man = mock.MagicMock() - self._pathutils = self._livemigrops._pathutils - - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'stop_console_handler') - @mock.patch('nova.virt.hyperv.vmops.VMOps.copy_vm_dvd_disks') - def _test_live_migration(self, mock_copy_dvd_disk, - mock_stop_console_handler, - side_effect=None, - shared_storage=False, - migrate_data_received=True, - migrate_data_version='1.1'): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_post = mock.MagicMock() - mock_recover = mock.MagicMock() - mock_copy_logs = self._livemigrops._pathutils.copy_vm_console_logs - fake_dest = mock.sentinel.DESTINATION - mock_check_shared_inst_dir = ( - self._pathutils.check_remote_instances_dir_shared) - mock_check_shared_inst_dir.return_value = shared_storage - self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [ - side_effect] - - if migrate_data_received: - migrate_data = migrate_data_obj.HyperVLiveMigrateData() - if migrate_data_version != '1.0': - migrate_data.is_shared_instance_path = shared_storage - else: - migrate_data = None - - self._livemigrops.live_migration(context=self.context, - instance_ref=mock_instance, - dest=fake_dest, - post_method=mock_post, - recover_method=mock_recover, - block_migration=( - mock.sentinel.block_migr), - migrate_data=migrate_data) - - if side_effect is os_win_exc.HyperVException: - mock_recover.assert_called_once_with(self.context, mock_instance, - fake_dest, - migrate_data) - mock_post.assert_not_called() - else: - post_call_args = mock_post.call_args_list - self.assertEqual(1, len(post_call_args)) - - post_call_args_list = post_call_args[0][0] - self.assertEqual((self.context, mock_instance, - fake_dest, mock.sentinel.block_migr), - post_call_args_list[:-1]) - # The last argument, the migrate_data object, should be created - # by the callee if not received. - migrate_data_arg = post_call_args_list[-1] - self.assertIsInstance( - migrate_data_arg, - migrate_data_obj.HyperVLiveMigrateData) - self.assertEqual(shared_storage, - migrate_data_arg.is_shared_instance_path) - - if not migrate_data_received or migrate_data_version == '1.0': - mock_check_shared_inst_dir.assert_called_once_with(fake_dest) - else: - self.assertFalse(mock_check_shared_inst_dir.called) - - mock_stop_console_handler.assert_called_once_with(mock_instance.name) - - if not shared_storage: - mock_copy_logs.assert_called_once_with(mock_instance.name, - fake_dest) - mock_copy_dvd_disk.assert_called_once_with(mock_instance.name, - fake_dest) - else: - self.assertFalse(mock_copy_logs.called) - self.assertFalse(mock_copy_dvd_disk.called) - - mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm - mock_live_migr.assert_called_once_with( - mock_instance.name, - fake_dest, - migrate_disks=not shared_storage) - - def test_live_migration(self): - self._test_live_migration(migrate_data_received=False) - - def test_live_migration_old_migrate_data_version(self): - self._test_live_migration(migrate_data_version='1.0') - - def test_live_migration_exception(self): - self._test_live_migration(side_effect=os_win_exc.HyperVException) - - def test_live_migration_shared_storage(self): - self._test_live_migration(shared_storage=True) - - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.get_disk_path_mapping') - @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.connect_volumes') - def _test_pre_live_migration(self, mock_initialize_connection, - mock_get_cached_image, - mock_get_disk_path_mapping, - phys_disks_attached=True): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.image_ref = "fake_image_ref" - mock_get_disk_path_mapping.return_value = ( - mock.sentinel.disk_path_mapping if phys_disks_attached - else None) - bdman = self._livemigrops._block_dev_man - mock_is_boot_from_vol = bdman.is_boot_from_volume - mock_is_boot_from_vol.return_value = None - CONF.set_override('use_cow_images', True) - self._livemigrops.pre_live_migration( - self.context, mock_instance, - block_device_info=mock.sentinel.BLOCK_INFO, - network_info=mock.sentinel.NET_INFO) - - check_config = ( - self._livemigrops._livemigrutils.check_live_migration_config) - check_config.assert_called_once_with() - mock_is_boot_from_vol.assert_called_once_with( - mock.sentinel.BLOCK_INFO) - mock_get_cached_image.assert_called_once_with(self.context, - mock_instance) - mock_initialize_connection.assert_called_once_with( - mock.sentinel.BLOCK_INFO) - mock_get_disk_path_mapping.assert_called_once_with( - mock.sentinel.BLOCK_INFO) - if phys_disks_attached: - livemigrutils = self._livemigrops._livemigrutils - livemigrutils.create_planned_vm.assert_called_once_with( - mock_instance.name, - mock_instance.host, - mock.sentinel.disk_path_mapping) - - def test_pre_live_migration(self): - self._test_pre_live_migration() - - def test_pre_live_migration_invalid_disk_mapping(self): - self._test_pre_live_migration(phys_disks_attached=False) - - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes') - def _test_post_live_migration(self, mock_disconnect_volumes, - shared_storage=False): - migrate_data = migrate_data_obj.HyperVLiveMigrateData( - is_shared_instance_path=shared_storage) - - self._livemigrops.post_live_migration( - self.context, mock.sentinel.instance, - mock.sentinel.block_device_info, - migrate_data) - mock_disconnect_volumes.assert_called_once_with( - mock.sentinel.block_device_info) - mock_get_inst_dir = self._pathutils.get_instance_dir - - if not shared_storage: - mock_get_inst_dir.assert_called_once_with( - mock.sentinel.instance.name, - create_dir=False, remove_dir=True) - else: - self.assertFalse(mock_get_inst_dir.called) - - def test_post_block_migration(self): - self._test_post_live_migration() - - def test_post_live_migration_shared_storage(self): - self._test_post_live_migration(shared_storage=True) - - @mock.patch.object(migrate_data_obj, 'HyperVLiveMigrateData') - def test_check_can_live_migrate_destination(self, mock_migr_data_cls): - mock_instance = fake_instance.fake_instance_obj(self.context) - migr_data = self._livemigrops.check_can_live_migrate_destination( - mock.sentinel.context, mock_instance, mock.sentinel.src_comp_info, - mock.sentinel.dest_comp_info) - - mock_check_shared_inst_dir = ( - self._pathutils.check_remote_instances_dir_shared) - mock_check_shared_inst_dir.assert_called_once_with(mock_instance.host) - - self.assertEqual(mock_migr_data_cls.return_value, migr_data) - self.assertEqual(mock_check_shared_inst_dir.return_value, - migr_data.is_shared_instance_path) - - @mock.patch('nova.virt.hyperv.vmops.VMOps.plug_vifs') - def test_post_live_migration_at_destination(self, mock_plug_vifs): - self._livemigrops.post_live_migration_at_destination( - self.context, mock.sentinel.instance, - network_info=mock.sentinel.NET_INFO, - block_migration=mock.sentinel.BLOCK_INFO) - mock_plug_vifs.assert_called_once_with(mock.sentinel.instance, - mock.sentinel.NET_INFO) - - def test_check_can_live_migrate_destination_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_check = self._pathutils.check_remote_instances_dir_shared - mock_check.side_effect = exception.FileNotFound(file_path='C:\\baddir') - self.assertRaises( - exception.MigrationPreCheckError, - self._livemigrops.check_can_live_migrate_destination, - mock.sentinel.context, mock_instance, mock.sentinel.src_comp_info, - mock.sentinel.dest_comp_info) diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py deleted file mode 100644 index d0b7ff32fd6f..000000000000 --- a/nova/tests/unit/virt/hyperv/test_migrationops.py +++ /dev/null @@ -1,546 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -from os_win import exceptions as os_win_exc -from oslo_utils import units - -from nova import exception -from nova import objects -from nova import test -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import migrationops - - -class MigrationOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V MigrationOps class.""" - - _FAKE_DISK = 'fake_disk' - _FAKE_TIMEOUT = 10 - _FAKE_RETRY_INTERVAL = 5 - - def setUp(self): - super(MigrationOpsTestCase, self).setUp() - self.context = 'fake-context' - - self._migrationops = migrationops.MigrationOps() - self._migrationops._hostutils = mock.MagicMock() - self._migrationops._vmops = mock.MagicMock() - self._migrationops._vmutils = mock.MagicMock() - self._migrationops._pathutils = mock.Mock() - self._migrationops._vhdutils = mock.MagicMock() - self._migrationops._pathutils = mock.MagicMock() - self._migrationops._volumeops = mock.MagicMock() - self._migrationops._imagecache = mock.MagicMock() - self._migrationops._block_dev_man = mock.MagicMock() - - def _check_migrate_disk_files(self, shared_storage=False): - instance_path = 'fake/instance/path' - dest_instance_path = 'remote/instance/path' - self._migrationops._pathutils.get_instance_dir.side_effect = ( - instance_path, dest_instance_path) - get_revert_dir = ( - self._migrationops._pathutils.get_instance_migr_revert_dir) - check_shared_storage = ( - self._migrationops._pathutils.check_dirs_shared_storage) - check_shared_storage.return_value = shared_storage - self._migrationops._pathutils.exists.return_value = True - - fake_disk_files = [os.path.join(instance_path, disk_name) - for disk_name in - ['root.vhd', 'configdrive.vhd', 'configdrive.iso', - 'eph0.vhd', 'eph1.vhdx']] - - expected_get_dir = [mock.call(mock.sentinel.instance_name), - mock.call(mock.sentinel.instance_name, - mock.sentinel.dest_path)] - expected_move_calls = [mock.call(instance_path, - get_revert_dir.return_value)] - - self._migrationops._migrate_disk_files( - instance_name=mock.sentinel.instance_name, - disk_files=fake_disk_files, - dest=mock.sentinel.dest_path) - - self._migrationops._pathutils.exists.assert_called_once_with( - dest_instance_path) - check_shared_storage.assert_called_once_with( - instance_path, dest_instance_path) - get_revert_dir.assert_called_with(mock.sentinel.instance_name, - remove_dir=True, create_dir=True) - if shared_storage: - fake_dest_path = '%s_tmp' % instance_path - expected_move_calls.append(mock.call(fake_dest_path, - instance_path)) - self._migrationops._pathutils.rmtree.assert_called_once_with( - fake_dest_path) - else: - fake_dest_path = dest_instance_path - - self._migrationops._pathutils.makedirs.assert_called_once_with( - fake_dest_path) - check_remove_dir = self._migrationops._pathutils.check_remove_dir - check_remove_dir.assert_called_once_with(fake_dest_path) - - self._migrationops._pathutils.get_instance_dir.assert_has_calls( - expected_get_dir) - self._migrationops._pathutils.copy.assert_has_calls( - mock.call(fake_disk_file, fake_dest_path) - for fake_disk_file in fake_disk_files) - self.assertEqual(len(fake_disk_files), - self._migrationops._pathutils.copy.call_count) - self._migrationops._pathutils.move_folder_files.assert_has_calls( - expected_move_calls) - - def test_migrate_disk_files(self): - self._check_migrate_disk_files() - - def test_migrate_disk_files_same_host(self): - self._check_migrate_disk_files(shared_storage=True) - - @mock.patch.object(migrationops.MigrationOps, - '_cleanup_failed_disk_migration') - def test_migrate_disk_files_exception(self, mock_cleanup): - instance_path = 'fake/instance/path' - fake_dest_path = '%s_tmp' % instance_path - self._migrationops._pathutils.get_instance_dir.return_value = ( - instance_path) - get_revert_dir = ( - self._migrationops._pathutils.get_instance_migr_revert_dir) - self._migrationops._hostutils.get_local_ips.return_value = [ - mock.sentinel.dest_path] - self._migrationops._pathutils.copy.side_effect = IOError( - "Expected exception.") - - self.assertRaises(IOError, self._migrationops._migrate_disk_files, - instance_name=mock.sentinel.instance_name, - disk_files=[self._FAKE_DISK], - dest=mock.sentinel.dest_path) - mock_cleanup.assert_called_once_with(instance_path, - get_revert_dir.return_value, - fake_dest_path) - - def test_cleanup_failed_disk_migration(self): - self._migrationops._pathutils.exists.return_value = True - - self._migrationops._cleanup_failed_disk_migration( - instance_path=mock.sentinel.instance_path, - revert_path=mock.sentinel.revert_path, - dest_path=mock.sentinel.dest_path) - - expected = [mock.call(mock.sentinel.dest_path), - mock.call(mock.sentinel.revert_path)] - self._migrationops._pathutils.exists.assert_has_calls(expected) - move_folder_files = self._migrationops._pathutils.move_folder_files - move_folder_files.assert_called_once_with( - mock.sentinel.revert_path, mock.sentinel.instance_path) - self._migrationops._pathutils.rmtree.assert_has_calls([ - mock.call(mock.sentinel.dest_path), - mock.call(mock.sentinel.revert_path)]) - - def test_check_target_flavor(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.root_gb = 1 - mock_flavor = mock.MagicMock(root_gb=0) - self.assertRaises(exception.InstanceFaultRollback, - self._migrationops._check_target_flavor, - mock_instance, mock_flavor) - - def test_check_and_attach_config_drive(self): - mock_instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - mock_instance.config_drive = 'True' - - self._migrationops._check_and_attach_config_drive( - mock_instance, mock.sentinel.vm_gen) - - self._migrationops._vmops.attach_config_drive.assert_called_once_with( - mock_instance, - self._migrationops._pathutils.lookup_configdrive_path.return_value, - mock.sentinel.vm_gen) - - def test_check_and_attach_config_drive_unknown_path(self): - instance = fake_instance.fake_instance_obj( - self.context, expected_attrs=['system_metadata']) - instance.config_drive = 'True' - self._migrationops._pathutils.lookup_configdrive_path.return_value = ( - None) - self.assertRaises(exception.ConfigDriveNotFound, - self._migrationops._check_and_attach_config_drive, - instance, - mock.sentinel.FAKE_VM_GEN) - - @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files') - @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor') - def test_migrate_disk_and_power_off(self, mock_check_flavor, - mock_migrate_disk_files): - instance = fake_instance.fake_instance_obj(self.context) - flavor = mock.MagicMock() - network_info = mock.MagicMock() - - disk_files = [mock.MagicMock()] - volume_drives = [mock.MagicMock()] - - mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths - mock_get_vm_st_path.return_value = (disk_files, volume_drives) - - self._migrationops.migrate_disk_and_power_off( - self.context, instance, mock.sentinel.FAKE_DEST, flavor, - network_info, mock.sentinel.bdi, - self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) - - mock_check_flavor.assert_called_once_with(instance, flavor) - self._migrationops._vmops.power_off.assert_called_once_with( - instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL) - mock_get_vm_st_path.assert_called_once_with(instance.name) - mock_migrate_disk_files.assert_called_once_with( - instance.name, disk_files, mock.sentinel.FAKE_DEST) - self._migrationops._vmops.destroy.assert_called_once_with( - instance, network_info, mock.sentinel.bdi, destroy_disks=False) - - def test_confirm_migration(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._migrationops.confirm_migration( - context=self.context, - migration=mock.sentinel.migration, instance=mock_instance, - network_info=mock.sentinel.network_info) - get_instance_migr_revert_dir = ( - self._migrationops._pathutils.get_instance_migr_revert_dir) - get_instance_migr_revert_dir.assert_called_with(mock_instance.name, - remove_dir=True) - - def test_revert_migration_files(self): - instance_path = ( - self._migrationops._pathutils.get_instance_dir.return_value) - get_revert_dir = ( - self._migrationops._pathutils.get_instance_migr_revert_dir) - - self._migrationops._revert_migration_files( - instance_name=mock.sentinel.instance_name) - - self._migrationops._pathutils.get_instance_dir.assert_called_once_with( - mock.sentinel.instance_name, create_dir=False, remove_dir=True) - get_revert_dir.assert_called_with(mock.sentinel.instance_name) - self._migrationops._pathutils.rename.assert_called_once_with( - get_revert_dir.return_value, instance_path) - - @mock.patch.object(migrationops.MigrationOps, - '_check_and_attach_config_drive') - @mock.patch.object(migrationops.MigrationOps, '_revert_migration_files') - @mock.patch.object(migrationops.MigrationOps, '_check_ephemeral_disks') - @mock.patch.object(objects.ImageMeta, "from_instance") - def _check_finish_revert_migration(self, mock_image, - mock_check_eph_disks, - mock_revert_migration_files, - mock_check_attach_config_drive, - disk_type=constants.DISK): - mock_image.return_value = objects.ImageMeta.from_dict({}) - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device = {'type': disk_type} - block_device_info = {'root_disk': root_device, 'ephemerals': []} - - self._migrationops.finish_revert_migration( - context=self.context, instance=mock_instance, - network_info=mock.sentinel.network_info, - block_device_info=block_device_info, - power_on=True) - - mock_revert_migration_files.assert_called_once_with( - mock_instance.name) - if root_device['type'] == constants.DISK: - lookup_root_vhd = ( - self._migrationops._pathutils.lookup_root_vhd_path) - lookup_root_vhd.assert_called_once_with(mock_instance.name) - self.assertEqual(lookup_root_vhd.return_value, - root_device['path']) - - get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation - get_image_vm_gen.assert_called_once_with( - mock_instance.uuid, test.MatchType(objects.ImageMeta)) - self._migrationops._vmops.create_instance.assert_called_once_with( - mock_instance, mock.sentinel.network_info, root_device, - block_device_info, get_image_vm_gen.return_value, - mock_image.return_value) - mock_check_attach_config_drive.assert_called_once_with( - mock_instance, get_image_vm_gen.return_value) - self._migrationops._vmops.set_boot_order.assert_called_once_with( - mock_instance.name, get_image_vm_gen.return_value, - block_device_info) - self._migrationops._vmops.power_on.assert_called_once_with( - mock_instance, network_info=mock.sentinel.network_info) - - def test_finish_revert_migration_boot_from_volume(self): - self._check_finish_revert_migration(disk_type=constants.VOLUME) - - def test_finish_revert_migration_boot_from_disk(self): - self._check_finish_revert_migration(disk_type=constants.DISK) - - @mock.patch.object(objects.ImageMeta, "from_instance") - def test_finish_revert_migration_no_root_vhd(self, mock_image): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._migrationops._pathutils.lookup_root_vhd_path.return_value = None - bdi = {'root_disk': {'type': constants.DISK}, - 'ephemerals': []} - - self.assertRaises( - exception.DiskNotFound, - self._migrationops.finish_revert_migration, self.context, - mock_instance, mock.sentinel.network_info, bdi, True) - - def test_merge_base_vhd(self): - fake_diff_vhd_path = 'fake/diff/path' - fake_base_vhd_path = 'fake/base/path' - base_vhd_copy_path = os.path.join( - os.path.dirname(fake_diff_vhd_path), - os.path.basename(fake_base_vhd_path)) - - self._migrationops._merge_base_vhd(diff_vhd_path=fake_diff_vhd_path, - base_vhd_path=fake_base_vhd_path) - - self._migrationops._pathutils.copyfile.assert_called_once_with( - fake_base_vhd_path, base_vhd_copy_path) - recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd - recon_parent_vhd.assert_called_once_with(fake_diff_vhd_path, - base_vhd_copy_path) - self._migrationops._vhdutils.merge_vhd.assert_called_once_with( - fake_diff_vhd_path) - self._migrationops._pathutils.rename.assert_called_once_with( - base_vhd_copy_path, fake_diff_vhd_path) - - def test_merge_base_vhd_exception(self): - fake_diff_vhd_path = 'fake/diff/path' - fake_base_vhd_path = 'fake/base/path' - base_vhd_copy_path = os.path.join( - os.path.dirname(fake_diff_vhd_path), - os.path.basename(fake_base_vhd_path)) - - self._migrationops._vhdutils.reconnect_parent_vhd.side_effect = ( - os_win_exc.HyperVException) - self._migrationops._pathutils.exists.return_value = True - - self.assertRaises(os_win_exc.HyperVException, - self._migrationops._merge_base_vhd, - fake_diff_vhd_path, fake_base_vhd_path) - self._migrationops._pathutils.exists.assert_called_once_with( - base_vhd_copy_path) - self._migrationops._pathutils.remove.assert_called_once_with( - base_vhd_copy_path) - - @mock.patch.object(migrationops.MigrationOps, '_resize_vhd') - def test_check_resize_vhd(self, mock_resize_vhd): - self._migrationops._check_resize_vhd( - vhd_path=mock.sentinel.vhd_path, vhd_info={'VirtualSize': 1}, - new_size=2) - mock_resize_vhd.assert_called_once_with(mock.sentinel.vhd_path, 2) - - def test_check_resize_vhd_exception(self): - self.assertRaises(exception.CannotResizeDisk, - self._migrationops._check_resize_vhd, - mock.sentinel.vhd_path, - {'VirtualSize': 1}, 0) - - @mock.patch.object(migrationops.MigrationOps, '_merge_base_vhd') - def test_resize_vhd(self, mock_merge_base_vhd): - fake_vhd_path = 'fake/path.vhd' - new_vhd_size = 2 - self._migrationops._resize_vhd(vhd_path=fake_vhd_path, - new_size=new_vhd_size) - - get_vhd_parent_path = self._migrationops._vhdutils.get_vhd_parent_path - get_vhd_parent_path.assert_called_once_with(fake_vhd_path) - mock_merge_base_vhd.assert_called_once_with( - fake_vhd_path, - self._migrationops._vhdutils.get_vhd_parent_path.return_value) - self._migrationops._vhdutils.resize_vhd.assert_called_once_with( - fake_vhd_path, new_vhd_size) - - def test_check_base_disk(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_src_vhd_path = 'fake/src/path' - fake_base_vhd = 'fake/vhd' - get_cached_image = self._migrationops._imagecache.get_cached_image - get_cached_image.return_value = fake_base_vhd - - self._migrationops._check_base_disk( - context=self.context, instance=mock_instance, - diff_vhd_path=mock.sentinel.diff_vhd_path, - src_base_disk_path=fake_src_vhd_path) - - get_cached_image.assert_called_once_with(self.context, mock_instance) - recon_parent_vhd = self._migrationops._vhdutils.reconnect_parent_vhd - recon_parent_vhd.assert_called_once_with( - mock.sentinel.diff_vhd_path, fake_base_vhd) - - @mock.patch.object(migrationops.MigrationOps, - '_check_and_attach_config_drive') - @mock.patch.object(migrationops.MigrationOps, '_check_base_disk') - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - @mock.patch.object(migrationops.MigrationOps, '_check_ephemeral_disks') - def _check_finish_migration(self, mock_check_eph_disks, - mock_check_resize_vhd, - mock_check_base_disk, - mock_check_attach_config_drive, - disk_type=constants.DISK): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.ephemeral_gb = 1 - root_device = {'type': disk_type} - block_device_info = {'root_disk': root_device, 'ephemerals': []} - - lookup_root_vhd = self._migrationops._pathutils.lookup_root_vhd_path - get_vhd_info = self._migrationops._vhdutils.get_vhd_info - mock_vhd_info = get_vhd_info.return_value - - expected_check_resize = [] - expected_get_info = [] - - self._migrationops.finish_migration( - context=self.context, migration=mock.sentinel.migration, - instance=mock_instance, disk_info=mock.sentinel.disk_info, - network_info=mock.sentinel.network_info, - image_meta=mock.sentinel.image_meta, resize_instance=True, - block_device_info=block_device_info) - - if root_device['type'] == constants.DISK: - root_device_path = lookup_root_vhd.return_value - lookup_root_vhd.assert_called_with(mock_instance.name) - expected_get_info = [mock.call(root_device_path)] - mock_vhd_info.get.assert_called_once_with("ParentPath") - mock_check_base_disk.assert_called_once_with( - self.context, mock_instance, root_device_path, - mock_vhd_info.get.return_value) - expected_check_resize.append( - mock.call(root_device_path, mock_vhd_info, - mock_instance.flavor.root_gb * units.Gi)) - - ephemerals = block_device_info['ephemerals'] - mock_check_eph_disks.assert_called_once_with( - mock_instance, ephemerals, True) - - mock_check_resize_vhd.assert_has_calls(expected_check_resize) - self._migrationops._vhdutils.get_vhd_info.assert_has_calls( - expected_get_info) - get_image_vm_gen = self._migrationops._vmops.get_image_vm_generation - get_image_vm_gen.assert_called_once_with(mock_instance.uuid, - mock.sentinel.image_meta) - self._migrationops._vmops.create_instance.assert_called_once_with( - mock_instance, mock.sentinel.network_info, root_device, - block_device_info, get_image_vm_gen.return_value, - mock.sentinel.image_meta) - mock_check_attach_config_drive.assert_called_once_with( - mock_instance, get_image_vm_gen.return_value) - self._migrationops._vmops.set_boot_order.assert_called_once_with( - mock_instance.name, get_image_vm_gen.return_value, - block_device_info) - self._migrationops._vmops.power_on.assert_called_once_with( - mock_instance, network_info=mock.sentinel.network_info) - - def test_finish_migration(self): - self._check_finish_migration(disk_type=constants.DISK) - - def test_finish_migration_boot_from_volume(self): - self._check_finish_migration(disk_type=constants.VOLUME) - - def test_finish_migration_no_root(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._migrationops._pathutils.lookup_root_vhd_path.return_value = None - bdi = {'root_disk': {'type': constants.DISK}, - 'ephemerals': []} - - self.assertRaises(exception.DiskNotFound, - self._migrationops.finish_migration, - self.context, mock.sentinel.migration, - mock_instance, mock.sentinel.disk_info, - mock.sentinel.network_info, - mock.sentinel.image_meta, True, bdi, True) - - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - @mock.patch.object(migrationops.LOG, 'warning') - def test_check_ephemeral_disks_multiple_eph_warn(self, mock_warn, - mock_check_resize_vhd): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.ephemeral_gb = 3 - mock_ephemerals = [{'size': 1}, {'size': 1}] - - self._migrationops._check_ephemeral_disks(mock_instance, - mock_ephemerals, - True) - - mock_warn.assert_called_once_with( - "Cannot resize multiple ephemeral disks for instance.", - instance=mock_instance) - - def test_check_ephemeral_disks_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_ephemerals = [dict()] - - lookup_eph_path = ( - self._migrationops._pathutils.lookup_ephemeral_vhd_path) - lookup_eph_path.return_value = None - - self.assertRaises(exception.DiskNotFound, - self._migrationops._check_ephemeral_disks, - mock_instance, mock_ephemerals) - - @mock.patch.object(migrationops.MigrationOps, '_check_resize_vhd') - def _test_check_ephemeral_disks(self, mock_check_resize_vhd, - existing_eph_path=None, new_eph_size=42): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.ephemeral_gb = new_eph_size - eph = {} - mock_ephemerals = [eph] - - mock_pathutils = self._migrationops._pathutils - lookup_eph_path = mock_pathutils.lookup_ephemeral_vhd_path - lookup_eph_path.return_value = existing_eph_path - mock_get_eph_vhd_path = mock_pathutils.get_ephemeral_vhd_path - mock_get_eph_vhd_path.return_value = mock.sentinel.get_path - - mock_vhdutils = self._migrationops._vhdutils - mock_get_vhd_format = mock_vhdutils.get_best_supported_vhd_format - mock_get_vhd_format.return_value = mock.sentinel.vhd_format - - self._migrationops._check_ephemeral_disks(mock_instance, - mock_ephemerals, - True) - - self.assertEqual(mock_instance.ephemeral_gb, eph['size']) - if not existing_eph_path: - mock_vmops = self._migrationops._vmops - mock_vmops.create_ephemeral_disk.assert_called_once_with( - mock_instance.name, eph) - self.assertEqual(mock.sentinel.vhd_format, eph['format']) - self.assertEqual(mock.sentinel.get_path, eph['path']) - elif new_eph_size: - mock_check_resize_vhd.assert_called_once_with( - existing_eph_path, - self._migrationops._vhdutils.get_vhd_info.return_value, - mock_instance.ephemeral_gb * units.Gi) - self.assertEqual(existing_eph_path, eph['path']) - else: - self._migrationops._pathutils.remove.assert_called_once_with( - existing_eph_path) - - def test_check_ephemeral_disks_create(self): - self._test_check_ephemeral_disks() - - def test_check_ephemeral_disks_resize(self): - self._test_check_ephemeral_disks(existing_eph_path=mock.sentinel.path) - - def test_check_ephemeral_disks_remove(self): - self._test_check_ephemeral_disks(existing_eph_path=mock.sentinel.path, - new_eph_size=0) diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py deleted file mode 100644 index 7bd9e91e3f26..000000000000 --- a/nova/tests/unit/virt/hyperv/test_pathutils.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time -from unittest import mock - -from nova import exception -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import pathutils - - -class PathUtilsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V PathUtils class.""" - - def setUp(self): - super(PathUtilsTestCase, self).setUp() - self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir') - self.fake_instance_name = 'fake_instance_name' - - self._pathutils = pathutils.PathUtils() - - def _mock_lookup_configdrive_path(self, ext, rescue=False): - self._pathutils.get_instance_dir = mock.MagicMock( - return_value=self.fake_instance_dir) - - def mock_exists(*args, **kwargs): - path = args[0] - return True if path[(path.rfind('.') + 1):] == ext else False - self._pathutils.exists = mock_exists - configdrive_path = self._pathutils.lookup_configdrive_path( - self.fake_instance_name, rescue) - return configdrive_path - - def _test_lookup_configdrive_path(self, rescue=False): - configdrive_name = 'configdrive' - if rescue: - configdrive_name += '-rescue' - - for format_ext in constants.DISK_FORMAT_MAP: - configdrive_path = self._mock_lookup_configdrive_path(format_ext, - rescue) - expected_path = os.path.join(self.fake_instance_dir, - configdrive_name + '.' + format_ext) - self.assertEqual(expected_path, configdrive_path) - - def test_lookup_configdrive_path(self): - self._test_lookup_configdrive_path() - - def test_lookup_rescue_configdrive_path(self): - self._test_lookup_configdrive_path(rescue=True) - - def test_lookup_configdrive_path_non_exist(self): - self._pathutils.get_instance_dir = mock.MagicMock( - return_value=self.fake_instance_dir) - self._pathutils.exists = mock.MagicMock(return_value=False) - configdrive_path = self._pathutils.lookup_configdrive_path( - self.fake_instance_name) - self.assertIsNone(configdrive_path) - - def test_get_instances_dir_local(self): - self.flags(instances_path=self.fake_instance_dir) - instances_dir = self._pathutils.get_instances_dir() - - self.assertEqual(self.fake_instance_dir, instances_dir) - - def test_get_instances_dir_remote_instance_share(self): - # The Hyper-V driver allows using a pre-configured share exporting - # the instances dir. The same share name should be used across nodes. - fake_instances_dir_share = 'fake_instances_dir_share' - fake_remote = 'fake_remote' - expected_instance_dir = r'\\%s\%s' % (fake_remote, - fake_instances_dir_share) - - self.flags(instances_path_share=fake_instances_dir_share, - group='hyperv') - instances_dir = self._pathutils.get_instances_dir( - remote_server=fake_remote) - self.assertEqual(expected_instance_dir, instances_dir) - - def test_get_instances_dir_administrative_share(self): - self.flags(instances_path=r'C:\fake_instance_dir') - fake_remote = 'fake_remote' - expected_instance_dir = r'\\fake_remote\C$\fake_instance_dir' - - instances_dir = self._pathutils.get_instances_dir( - remote_server=fake_remote) - self.assertEqual(expected_instance_dir, instances_dir) - - def test_get_instances_dir_unc_path(self): - fake_instance_dir = r'\\fake_addr\fake_share\fake_instance_dir' - self.flags(instances_path=fake_instance_dir) - fake_remote = 'fake_remote' - - instances_dir = self._pathutils.get_instances_dir( - remote_server=fake_remote) - self.assertEqual(fake_instance_dir, instances_dir) - - @mock.patch('os.path.join') - def test_get_instances_sub_dir(self, fake_path_join): - - class WindowsError(Exception): - def __init__(self, winerror=None): - self.winerror = winerror - - fake_dir_name = "fake_dir_name" - fake_windows_error = WindowsError - self._pathutils.check_create_dir = mock.MagicMock( - side_effect=WindowsError(pathutils.ERROR_INVALID_NAME)) - with mock.patch('builtins.WindowsError', - fake_windows_error, create=True): - self.assertRaises(exception.AdminRequired, - self._pathutils._get_instances_sub_dir, - fake_dir_name) - - def test_copy_vm_console_logs(self): - fake_local_logs = [mock.sentinel.log_path, - mock.sentinel.archived_log_path] - fake_remote_logs = [mock.sentinel.remote_log_path, - mock.sentinel.remote_archived_log_path] - - self._pathutils.exists = mock.Mock(return_value=True) - self._pathutils.copy = mock.Mock() - self._pathutils.get_vm_console_log_paths = mock.Mock( - side_effect=[fake_local_logs, fake_remote_logs]) - - self._pathutils.copy_vm_console_logs(mock.sentinel.instance_name, - mock.sentinel.dest_host) - - self._pathutils.get_vm_console_log_paths.assert_has_calls( - [mock.call(mock.sentinel.instance_name), - mock.call(mock.sentinel.instance_name, - remote_server=mock.sentinel.dest_host)]) - self._pathutils.copy.assert_has_calls([ - mock.call(mock.sentinel.log_path, - mock.sentinel.remote_log_path), - mock.call(mock.sentinel.archived_log_path, - mock.sentinel.remote_archived_log_path)]) - - @mock.patch.object(pathutils.PathUtils, 'get_base_vhd_dir') - @mock.patch.object(pathutils.PathUtils, 'exists') - def test_get_image_path(self, mock_exists, - mock_get_base_vhd_dir): - fake_image_name = 'fake_image_name' - mock_exists.side_effect = [True, False] - mock_get_base_vhd_dir.return_value = 'fake_base_dir' - - res = self._pathutils.get_image_path(fake_image_name) - - mock_get_base_vhd_dir.assert_called_once_with() - - self.assertEqual(res, - os.path.join('fake_base_dir', 'fake_image_name.vhd')) - - @mock.patch('os.path.getmtime') - @mock.patch.object(pathutils, 'time') - def test_get_age_of_file(self, mock_time, mock_getmtime): - mock_time.time.return_value = time.time() - mock_getmtime.return_value = mock_time.time.return_value - 42 - - actual_age = self._pathutils.get_age_of_file(mock.sentinel.filename) - self.assertEqual(42, actual_age) - mock_time.time.assert_called_once_with() - mock_getmtime.assert_called_once_with(mock.sentinel.filename) - - @mock.patch('os.path.exists') - @mock.patch('tempfile.NamedTemporaryFile') - def test_check_dirs_shared_storage(self, mock_named_tempfile, - mock_exists): - fake_src_dir = 'fake_src_dir' - fake_dest_dir = 'fake_dest_dir' - - mock_exists.return_value = True - mock_tmpfile = mock_named_tempfile.return_value.__enter__.return_value - mock_tmpfile.name = 'fake_tmp_fname' - expected_src_tmp_path = os.path.join(fake_src_dir, - mock_tmpfile.name) - - self._pathutils.check_dirs_shared_storage( - fake_src_dir, fake_dest_dir) - - mock_named_tempfile.assert_called_once_with(dir=fake_dest_dir) - mock_exists.assert_called_once_with(expected_src_tmp_path) - - @mock.patch('os.path.exists') - @mock.patch('tempfile.NamedTemporaryFile') - def test_check_dirs_shared_storage_exception(self, mock_named_tempfile, - mock_exists): - fake_src_dir = 'fake_src_dir' - fake_dest_dir = 'fake_dest_dir' - - mock_exists.return_value = True - mock_named_tempfile.side_effect = OSError('not exist') - - self.assertRaises(exception.FileNotFound, - self._pathutils.check_dirs_shared_storage, - fake_src_dir, fake_dest_dir) - - @mock.patch.object(pathutils.PathUtils, 'check_dirs_shared_storage') - @mock.patch.object(pathutils.PathUtils, 'get_instances_dir') - def test_check_remote_instances_shared(self, mock_get_instances_dir, - mock_check_dirs_shared_storage): - mock_get_instances_dir.side_effect = [mock.sentinel.local_inst_dir, - mock.sentinel.remote_inst_dir] - - shared_storage = self._pathutils.check_remote_instances_dir_shared( - mock.sentinel.dest) - - self.assertEqual(mock_check_dirs_shared_storage.return_value, - shared_storage) - mock_get_instances_dir.assert_has_calls( - [mock.call(), mock.call(mock.sentinel.dest)]) - mock_check_dirs_shared_storage.assert_called_once_with( - mock.sentinel.local_inst_dir, mock.sentinel.remote_inst_dir) diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py deleted file mode 100644 index 5e6bf9a3c3db..000000000000 --- a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2015 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit tests for the Hyper-V RDPConsoleOps. -""" - -from unittest import mock - -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import rdpconsoleops - - -class RDPConsoleOpsTestCase(test_base.HyperVBaseTestCase): - - def setUp(self): - super(RDPConsoleOpsTestCase, self).setUp() - - self.rdpconsoleops = rdpconsoleops.RDPConsoleOps() - self.rdpconsoleops._hostops = mock.MagicMock() - self.rdpconsoleops._vmutils = mock.MagicMock() - self.rdpconsoleops._rdpconsoleutils = mock.MagicMock() - - def test_get_rdp_console(self): - mock_get_host_ip = self.rdpconsoleops._hostops.get_host_ip_addr - mock_get_rdp_port = ( - self.rdpconsoleops._rdpconsoleutils.get_rdp_console_port) - mock_get_vm_id = self.rdpconsoleops._vmutils.get_vm_id - - connect_info = self.rdpconsoleops.get_rdp_console(mock.DEFAULT) - - self.assertEqual(mock_get_host_ip.return_value, connect_info.host) - self.assertEqual(mock_get_rdp_port.return_value, connect_info.port) - self.assertEqual(mock_get_vm_id.return_value, - connect_info.internal_access_path) diff --git a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py deleted file mode 100644 index e9461408c4b9..000000000000 --- a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from nova import exception -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import serialconsolehandler -from nova.virt.hyperv import serialproxy - - -class SerialConsoleHandlerTestCase(test_base.HyperVBaseTestCase): - @mock.patch.object(pathutils.PathUtils, 'get_vm_console_log_paths') - def setUp(self, mock_get_log_paths): - super(SerialConsoleHandlerTestCase, self).setUp() - - mock_get_log_paths.return_value = [mock.sentinel.log_path] - - self._consolehandler = serialconsolehandler.SerialConsoleHandler( - mock.sentinel.instance_name) - - self._consolehandler._log_path = mock.sentinel.log_path - self._consolehandler._pathutils = mock.Mock() - self._consolehandler._vmutils = mock.Mock() - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_handlers') - def test_start(self, mock_setup_handlers): - mock_workers = [mock.Mock(), mock.Mock()] - self._consolehandler._workers = mock_workers - - self._consolehandler.start() - - mock_setup_handlers.assert_called_once_with() - for worker in mock_workers: - worker.start.assert_called_once_with() - - @mock.patch('nova.console.serial.release_port') - def test_stop(self, mock_release_port): - mock_serial_proxy = mock.Mock() - mock_workers = [mock_serial_proxy, mock.Mock()] - - self._consolehandler._serial_proxy = mock_serial_proxy - self._consolehandler._listen_host = mock.sentinel.host - self._consolehandler._listen_port = mock.sentinel.port - self._consolehandler._workers = mock_workers - - self._consolehandler.stop() - - mock_release_port.assert_called_once_with(mock.sentinel.host, - mock.sentinel.port) - for worker in mock_workers: - worker.stop.assert_called_once_with() - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_named_pipe_handlers') - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_setup_serial_proxy_handler') - def _test_setup_handlers(self, mock_setup_proxy, mock_setup_pipe_handlers, - serial_console_enabled=True): - self.flags(enabled=serial_console_enabled, group='serial_console') - - self._consolehandler._setup_handlers() - - self.assertEqual(serial_console_enabled, mock_setup_proxy.called) - mock_setup_pipe_handlers.assert_called_once_with() - - def test_setup_handlers(self): - self._test_setup_handlers() - - def test_setup_handlers_console_disabled(self): - self._test_setup_handlers(serial_console_enabled=False) - - @mock.patch.object(serialproxy, 'SerialProxy') - @mock.patch('nova.console.serial.acquire_port') - @mock.patch.object(serialconsolehandler.threading, 'Event') - @mock.patch.object(serialconsolehandler.ioutils, 'IOQueue') - def test_setup_serial_proxy_handler(self, mock_io_queue, mock_event, - mock_acquire_port, - mock_serial_proxy_class): - mock_input_queue = mock.sentinel.input_queue - mock_output_queue = mock.sentinel.output_queue - mock_client_connected = mock_event.return_value - mock_io_queue.side_effect = [mock_input_queue, mock_output_queue] - mock_serial_proxy = mock_serial_proxy_class.return_value - - mock_acquire_port.return_value = mock.sentinel.port - self.flags(proxyclient_address='127.0.0.3', - group='serial_console') - - self._consolehandler._setup_serial_proxy_handler() - - mock_serial_proxy_class.assert_called_once_with( - mock.sentinel.instance_name, - '127.0.0.3', mock.sentinel.port, - mock_input_queue, - mock_output_queue, - mock_client_connected) - - self.assertIn(mock_serial_proxy, self._consolehandler._workers) - - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_get_named_pipe_handler') - @mock.patch.object(serialconsolehandler.SerialConsoleHandler, - '_get_vm_serial_port_mapping') - def _mock_setup_named_pipe_handlers(self, mock_get_port_mapping, - mock_get_pipe_handler, - serial_port_mapping=None): - mock_get_port_mapping.return_value = serial_port_mapping - - self._consolehandler._setup_named_pipe_handlers() - - expected_workers = [mock_get_pipe_handler.return_value - for port in serial_port_mapping] - - self.assertEqual(expected_workers, self._consolehandler._workers) - - return mock_get_pipe_handler - - def test_setup_ro_pipe_handler(self): - serial_port_mapping = { - constants.SERIAL_PORT_TYPE_RW: mock.sentinel.pipe_path - } - - mock_get_handler = self._mock_setup_named_pipe_handlers( - serial_port_mapping=serial_port_mapping) - - mock_get_handler.assert_called_once_with( - mock.sentinel.pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=True) - - def test_setup_pipe_handlers(self): - serial_port_mapping = { - constants.SERIAL_PORT_TYPE_RO: mock.sentinel.ro_pipe_path, - constants.SERIAL_PORT_TYPE_RW: mock.sentinel.rw_pipe_path - } - - mock_get_handler = self._mock_setup_named_pipe_handlers( - serial_port_mapping=serial_port_mapping) - - expected_calls = [mock.call(mock.sentinel.ro_pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RO, - enable_logging=True), - mock.call(mock.sentinel.rw_pipe_path, - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=False)] - mock_get_handler.assert_has_calls(expected_calls, any_order=True) - - @mock.patch.object(serialconsolehandler.utilsfactory, - 'get_named_pipe_handler') - def _test_get_named_pipe_handler(self, mock_get_pipe_handler, - pipe_type=None, enable_logging=False): - expected_args = {} - - if pipe_type == constants.SERIAL_PORT_TYPE_RW: - self._consolehandler._input_queue = mock.sentinel.input_queue - self._consolehandler._output_queue = mock.sentinel.output_queue - self._consolehandler._client_connected = ( - mock.sentinel.connect_event) - expected_args.update({ - 'input_queue': mock.sentinel.input_queue, - 'output_queue': mock.sentinel.output_queue, - 'connect_event': mock.sentinel.connect_event}) - - if enable_logging: - expected_args['log_file'] = mock.sentinel.log_path - - ret_val = self._consolehandler._get_named_pipe_handler( - mock.sentinel.pipe_path, pipe_type, enable_logging) - - self.assertEqual(mock_get_pipe_handler.return_value, ret_val) - mock_get_pipe_handler.assert_called_once_with( - mock.sentinel.pipe_path, - **expected_args) - - def test_get_ro_named_pipe_handler(self): - self._test_get_named_pipe_handler( - pipe_type=constants.SERIAL_PORT_TYPE_RO, - enable_logging=True) - - def test_get_rw_named_pipe_handler(self): - self._test_get_named_pipe_handler( - pipe_type=constants.SERIAL_PORT_TYPE_RW, - enable_logging=False) - - def _mock_get_port_connections(self, port_connections): - get_port_connections = ( - self._consolehandler._vmutils.get_vm_serial_port_connections) - get_port_connections.return_value = port_connections - - def test_get_vm_serial_port_mapping_having_tagged_pipes(self): - ro_pipe_path = 'fake_pipe_ro' - rw_pipe_path = 'fake_pipe_rw' - self._mock_get_port_connections([ro_pipe_path, rw_pipe_path]) - - ret_val = self._consolehandler._get_vm_serial_port_mapping() - - expected_mapping = { - constants.SERIAL_PORT_TYPE_RO: ro_pipe_path, - constants.SERIAL_PORT_TYPE_RW: rw_pipe_path - } - - self.assertEqual(expected_mapping, ret_val) - - def test_get_vm_serial_port_mapping_untagged_pipe(self): - pipe_path = 'fake_pipe_path' - self._mock_get_port_connections([pipe_path]) - - ret_val = self._consolehandler._get_vm_serial_port_mapping() - - expected_mapping = {constants.SERIAL_PORT_TYPE_RW: pipe_path} - self.assertEqual(expected_mapping, ret_val) - - def test_get_vm_serial_port_mapping_exception(self): - self._mock_get_port_connections([]) - self.assertRaises(exception.NovaException, - self._consolehandler._get_vm_serial_port_mapping) - - @mock.patch('nova.console.type.ConsoleSerial') - def test_get_serial_console(self, mock_serial_console): - self.flags(enabled=True, group='serial_console') - self._consolehandler._listen_host = mock.sentinel.host - self._consolehandler._listen_port = mock.sentinel.port - - ret_val = self._consolehandler.get_serial_console() - self.assertEqual(mock_serial_console.return_value, ret_val) - mock_serial_console.assert_called_once_with( - host=mock.sentinel.host, - port=mock.sentinel.port) - - def test_get_serial_console_disabled(self): - self.flags(enabled=False, group='serial_console') - self.assertRaises(exception.ConsoleTypeUnavailable, - self._consolehandler.get_serial_console) diff --git a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py deleted file mode 100644 index 4a4b7c8e4f28..000000000000 --- a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from nova import exception -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import serialconsolehandler -from nova.virt.hyperv import serialconsoleops - - -class SerialConsoleOpsTestCase(test_base.HyperVBaseTestCase): - def setUp(self): - super(SerialConsoleOpsTestCase, self).setUp() - serialconsoleops._console_handlers = {} - self._serialops = serialconsoleops.SerialConsoleOps() - self._serialops._pathutils = mock.MagicMock() - - def _setup_console_handler_mock(self): - mock_console_handler = mock.Mock() - serialconsoleops._console_handlers = {mock.sentinel.instance_name: - mock_console_handler} - return mock_console_handler - - @mock.patch.object(serialconsolehandler, 'SerialConsoleHandler') - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'stop_console_handler_unsync') - def _test_start_console_handler(self, mock_stop_handler, - mock_console_handler, - raise_exception=False): - mock_handler = mock_console_handler.return_value - - if raise_exception: - mock_handler.start.side_effect = Exception - - self._serialops.start_console_handler(mock.sentinel.instance_name) - - mock_stop_handler.assert_called_once_with(mock.sentinel.instance_name) - mock_console_handler.assert_called_once_with( - mock.sentinel.instance_name) - - if raise_exception: - mock_handler.stop.assert_called_once_with() - else: - console_handler = serialconsoleops._console_handlers.get( - mock.sentinel.instance_name) - self.assertEqual(mock_handler, console_handler) - - def test_start_console_handler(self): - self._test_start_console_handler() - - def test_start_console_handler_exception(self): - self._test_start_console_handler(raise_exception=True) - - def test_stop_console_handler(self): - mock_console_handler = self._setup_console_handler_mock() - - self._serialops.stop_console_handler(mock.sentinel.instance_name) - - mock_console_handler.stop.assert_called_once_with() - handler = serialconsoleops._console_handlers.get( - mock.sentinel.instance_name) - self.assertIsNone(handler) - - def test_get_serial_console(self): - mock_console_handler = self._setup_console_handler_mock() - - ret_val = self._serialops.get_serial_console( - mock.sentinel.instance_name) - - self.assertEqual(mock_console_handler.get_serial_console(), - ret_val) - - def test_get_serial_console_exception(self): - self.assertRaises(exception.ConsoleTypeUnavailable, - self._serialops.get_serial_console, - mock.sentinel.instance_name) - - @mock.patch('builtins.open') - @mock.patch("os.path.exists") - def test_get_console_output_exception(self, fake_path_exists, fake_open): - self._serialops._pathutils.get_vm_console_log_paths.return_value = [ - mock.sentinel.log_path_1, mock.sentinel.log_path_2] - fake_open.side_effect = IOError - fake_path_exists.return_value = True - - self.assertRaises(exception.ConsoleLogOutputException, - self._serialops.get_console_output, - mock.sentinel.instance_name) - fake_open.assert_called_once_with(mock.sentinel.log_path_2, 'rb') - - @mock.patch('os.path.exists') - @mock.patch.object(serialconsoleops.SerialConsoleOps, - 'start_console_handler') - def test_start_console_handlers(self, mock_get_instance_dir, mock_exists): - self._serialops._pathutils.get_instance_dir.return_value = [ - mock.sentinel.nova_instance_name, - mock.sentinel.other_instance_name] - mock_exists.side_effect = [True, False] - - self._serialops.start_console_handlers() - - self._serialops._vmutils.get_active_instances.assert_called_once_with() diff --git a/nova/tests/unit/virt/hyperv/test_serialproxy.py b/nova/tests/unit/virt/hyperv/test_serialproxy.py deleted file mode 100644 index b7e08a67ddc8..000000000000 --- a/nova/tests/unit/virt/hyperv/test_serialproxy.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket -from unittest import mock - - -from nova import exception -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import serialproxy - - -class SerialProxyTestCase(test_base.HyperVBaseTestCase): - @mock.patch.object(socket, 'socket') - def setUp(self, mock_socket): - super(SerialProxyTestCase, self).setUp() - - self._mock_socket = mock_socket - self._mock_input_queue = mock.Mock() - self._mock_output_queue = mock.Mock() - self._mock_client_connected = mock.Mock() - - threading_patcher = mock.patch.object(serialproxy, 'threading') - threading_patcher.start() - self.addCleanup(threading_patcher.stop) - - self._proxy = serialproxy.SerialProxy( - mock.sentinel.instance_nane, - mock.sentinel.host, - mock.sentinel.port, - self._mock_input_queue, - self._mock_output_queue, - self._mock_client_connected) - - @mock.patch.object(socket, 'socket') - def test_setup_socket_exception(self, mock_socket): - fake_socket = mock_socket.return_value - - fake_socket.listen.side_effect = socket.error - - self.assertRaises(exception.NovaException, - self._proxy._setup_socket) - - fake_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1) - fake_socket.bind.assert_called_once_with((mock.sentinel.host, - mock.sentinel.port)) - - def test_stop_serial_proxy(self): - self._proxy._conn = mock.Mock() - self._proxy._sock = mock.Mock() - - self._proxy.stop() - - self._proxy._stopped.set.assert_called_once_with() - self._proxy._client_connected.clear.assert_called_once_with() - self._proxy._conn.shutdown.assert_called_once_with(socket.SHUT_RDWR) - self._proxy._conn.close.assert_called_once_with() - self._proxy._sock.close.assert_called_once_with() - - @mock.patch.object(serialproxy.SerialProxy, '_accept_conn') - @mock.patch.object(serialproxy.SerialProxy, '_setup_socket') - def test_run(self, mock_setup_socket, mock_accept_con): - self._proxy._stopped = mock.MagicMock() - self._proxy._stopped.isSet.side_effect = [False, True] - - self._proxy.run() - - mock_setup_socket.assert_called_once_with() - mock_accept_con.assert_called_once_with() - - def test_accept_connection(self): - mock_conn = mock.Mock() - self._proxy._sock = mock.Mock() - self._proxy._sock.accept.return_value = [ - mock_conn, (mock.sentinel.client_addr, mock.sentinel.client_port)] - - self._proxy._accept_conn() - - self._proxy._client_connected.set.assert_called_once_with() - mock_conn.close.assert_called_once_with() - self.assertIsNone(self._proxy._conn) - - thread = serialproxy.threading.Thread - for job in [self._proxy._get_data, - self._proxy._send_data]: - thread.assert_any_call(target=job) - - def test_get_data(self): - self._mock_client_connected.isSet.return_value = True - self._proxy._conn = mock.Mock() - self._proxy._conn.recv.side_effect = [mock.sentinel.data, None] - - self._proxy._get_data() - - self._mock_client_connected.clear.assert_called_once_with() - self._mock_input_queue.put.assert_called_once_with(mock.sentinel.data) - - def _test_send_data(self, exception=None): - self._mock_client_connected.isSet.side_effect = [True, False] - self._mock_output_queue.get_burst.return_value = mock.sentinel.data - self._proxy._conn = mock.Mock() - self._proxy._conn.sendall.side_effect = exception - - self._proxy._send_data() - - self._proxy._conn.sendall.assert_called_once_with( - mock.sentinel.data) - - if exception: - self._proxy._client_connected.clear.assert_called_once_with() - - def test_send_data(self): - self._test_send_data() - - def test_send_data_exception(self): - self._test_send_data(exception=socket.error) diff --git a/nova/tests/unit/virt/hyperv/test_snapshotops.py b/nova/tests/unit/virt/hyperv/test_snapshotops.py deleted file mode 100644 index 1bb2f8dd4b47..000000000000 --- a/nova/tests/unit/virt/hyperv/test_snapshotops.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - - -from nova.compute import task_states -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import snapshotops - - -class SnapshotOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V SnapshotOps class.""" - - def setUp(self): - super(SnapshotOpsTestCase, self).setUp() - - self.context = 'fake_context' - self._snapshotops = snapshotops.SnapshotOps() - self._snapshotops._pathutils = mock.MagicMock() - self._snapshotops._vmutils = mock.MagicMock() - self._snapshotops._vhdutils = mock.MagicMock() - - @mock.patch('nova.image.glance.get_remote_image_service') - def test_save_glance_image(self, mock_get_remote_image_service): - image_metadata = {"disk_format": "vhd", - "container_format": "bare"} - glance_image_service = mock.MagicMock() - mock_get_remote_image_service.return_value = (glance_image_service, - mock.sentinel.IMAGE_ID) - self._snapshotops._save_glance_image(context=self.context, - image_id=mock.sentinel.IMAGE_ID, - image_vhd_path=mock.sentinel.PATH) - mock_get_remote_image_service.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID) - self._snapshotops._pathutils.open.assert_called_with( - mock.sentinel.PATH, 'rb') - glance_image_service.update.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, image_metadata, - self._snapshotops._pathutils.open().__enter__(), - purge_props=False) - - @mock.patch('nova.virt.hyperv.snapshotops.SnapshotOps._save_glance_image') - def _test_snapshot(self, mock_save_glance_image, base_disk_path): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_update = mock.MagicMock() - fake_src_path = os.path.join('fake', 'path') - self._snapshotops._pathutils.lookup_root_vhd_path.return_value = ( - fake_src_path) - fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir') - self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir - self._snapshotops._vhdutils.get_vhd_parent_path.return_value = ( - base_disk_path) - fake_snapshot_path = ( - self._snapshotops._vmutils.take_vm_snapshot.return_value) - - self._snapshotops.snapshot(context=self.context, - instance=mock_instance, - image_id=mock.sentinel.IMAGE_ID, - update_task_state=mock_update) - - self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with( - mock_instance.name) - mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path - mock_lookup_path.assert_called_once_with(mock_instance.name) - mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path - mock_get_vhd_path.assert_called_once_with(fake_src_path) - self._snapshotops._pathutils.get_export_dir.assert_called_once_with( - mock_instance.name) - - expected = [mock.call(fake_src_path, - os.path.join(fake_exp_dir, - os.path.basename(fake_src_path)))] - dest_vhd_path = os.path.join(fake_exp_dir, - os.path.basename(fake_src_path)) - if base_disk_path: - basename = os.path.basename(base_disk_path) - base_dest_disk_path = os.path.join(fake_exp_dir, basename) - expected.append(mock.call(base_disk_path, base_dest_disk_path)) - mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd - mock_reconnect.assert_called_once_with(dest_vhd_path, - base_dest_disk_path) - self._snapshotops._vhdutils.merge_vhd.assert_called_once_with( - dest_vhd_path) - mock_save_glance_image.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path) - else: - mock_save_glance_image.assert_called_once_with( - self.context, mock.sentinel.IMAGE_ID, dest_vhd_path) - self.assertEqual(len(expected), - self._snapshotops._pathutils.copyfile.call_count) - self._snapshotops._pathutils.copyfile.assert_has_calls(expected) - self.assertEqual(2, mock_update.call_count) - expected_update = [ - mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), - mock.call(task_state=task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD)] - mock_update.assert_has_calls(expected_update) - self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with( - fake_snapshot_path) - self._snapshotops._pathutils.rmtree.assert_called_once_with( - fake_exp_dir) - - def test_snapshot(self): - base_disk_path = os.path.join('fake', 'disk') - self._test_snapshot(base_disk_path=base_disk_path) - - def test_snapshot_no_base_disk(self): - self._test_snapshot(base_disk_path=None) diff --git a/nova/tests/unit/virt/hyperv/test_vif.py b/nova/tests/unit/virt/hyperv/test_vif.py deleted file mode 100644 index d4c8d7af58d8..000000000000 --- a/nova/tests/unit/virt/hyperv/test_vif.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -import nova.conf -from nova import exception -from nova.network import model -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import vif - -CONF = nova.conf.CONF - - -class HyperVVIFDriverTestCase(test_base.HyperVBaseTestCase): - def setUp(self): - super(HyperVVIFDriverTestCase, self).setUp() - self.vif_driver = vif.HyperVVIFDriver() - self.vif_driver._netutils = mock.MagicMock() - - def test_plug(self): - vif = {'type': model.VIF_TYPE_HYPERV} - # this is handled by neutron so just assert it doesn't blow up - self.vif_driver.plug(mock.sentinel.instance, vif) - - @mock.patch.object(vif, 'os_vif') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_instance') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_vif') - def test_plug_ovs(self, mock_nova_to_osvif_vif, - mock_nova_to_osvif_instance, mock_os_vif): - vif = {'type': model.VIF_TYPE_OVS} - self.vif_driver.plug(mock.sentinel.instance, vif) - - mock_nova_to_osvif_vif.assert_called_once_with(vif) - mock_nova_to_osvif_instance.assert_called_once_with( - mock.sentinel.instance) - connect_vnic = self.vif_driver._netutils.connect_vnic_to_vswitch - connect_vnic.assert_called_once_with( - CONF.hyperv.vswitch_name, mock_nova_to_osvif_vif.return_value.id) - mock_os_vif.plug.assert_called_once_with( - mock_nova_to_osvif_vif.return_value, - mock_nova_to_osvif_instance.return_value) - - def test_plug_type_unknown(self): - vif = {'type': mock.sentinel.vif_type} - self.assertRaises(exception.VirtualInterfacePlugException, - self.vif_driver.plug, - mock.sentinel.instance, vif) - - def test_unplug(self): - vif = {'type': model.VIF_TYPE_HYPERV} - # this is handled by neutron so just assert it doesn't blow up - self.vif_driver.unplug(mock.sentinel.instance, vif) - - @mock.patch.object(vif, 'os_vif') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_instance') - @mock.patch.object(vif.os_vif_util, 'nova_to_osvif_vif') - def test_unplug_ovs(self, mock_nova_to_osvif_vif, - mock_nova_to_osvif_instance, mock_os_vif): - vif = {'type': model.VIF_TYPE_OVS} - self.vif_driver.unplug(mock.sentinel.instance, vif) - - mock_nova_to_osvif_vif.assert_called_once_with(vif) - mock_nova_to_osvif_instance.assert_called_once_with( - mock.sentinel.instance) - mock_os_vif.unplug.assert_called_once_with( - mock_nova_to_osvif_vif.return_value, - mock_nova_to_osvif_instance.return_value) - - def test_unplug_type_unknown(self): - vif = {'type': mock.sentinel.vif_type} - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.vif_driver.unplug, - mock.sentinel.instance, vif) diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py deleted file mode 100644 index 1e3e50f92b24..000000000000 --- a/nova/tests/unit/virt/hyperv/test_vmops.py +++ /dev/null @@ -1,1844 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock - -import ddt -from eventlet import timeout as etimeout -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from oslo_concurrency import processutils -from oslo_config import cfg -from oslo_utils import fileutils -from oslo_utils import units - -from nova.compute import vm_states -from nova import exception -from nova import objects -from nova.objects import fields -from nova.objects import flavor as flavor_obj -from nova.tests.unit import fake_instance -from nova.tests.unit.objects import test_flavor -from nova.tests.unit.objects import test_virtual_interface -from nova.tests.unit.virt.hyperv import test_base -from nova.virt import hardware -from nova.virt.hyperv import constants -from nova.virt.hyperv import vmops -from nova.virt.hyperv import volumeops - -CONF = cfg.CONF - - -@ddt.ddt -class VMOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V VMOps class.""" - - _FAKE_TIMEOUT = 2 - FAKE_SIZE = 10 - FAKE_DIR = 'fake_dir' - FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s' - FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso' - FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd' - FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' - FAKE_LOG = 'fake_log' - - _WIN_VERSION_6_3 = '6.3.0' - _WIN_VERSION_10 = '10.0' - - ISO9660 = 'iso9660' - _FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd' - - def setUp(self): - super(VMOpsTestCase, self).setUp() - self.context = 'fake-context' - - self._vmops = vmops.VMOps(virtapi=mock.MagicMock()) - self._vmops._vmutils = mock.MagicMock() - self._vmops._metricsutils = mock.MagicMock() - self._vmops._vhdutils = mock.MagicMock() - self._vmops._pathutils = mock.MagicMock() - self._vmops._hostutils = mock.MagicMock() - self._vmops._migrutils = mock.MagicMock() - self._vmops._serial_console_ops = mock.MagicMock() - self._vmops._block_dev_man = mock.MagicMock() - self._vmops._vif_driver = mock.MagicMock() - - def test_list_instances(self): - mock_instance = mock.MagicMock() - self._vmops._vmutils.list_instances.return_value = [mock_instance] - response = self._vmops.list_instances() - self._vmops._vmutils.list_instances.assert_called_once_with() - self.assertEqual(response, [mock_instance]) - - def _test_get_info(self, vm_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_info = mock.MagicMock(spec_set=dict) - fake_info = {'EnabledState': 2, - 'MemoryUsage': mock.sentinel.FAKE_MEM_KB, - 'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU, - 'UpTime': mock.sentinel.FAKE_CPU_NS} - - def getitem(key): - return fake_info[key] - mock_info.__getitem__.side_effect = getitem - - expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2]) - - self._vmops._vmutils.vm_exists.return_value = vm_exists - self._vmops._vmutils.get_vm_summary_info.return_value = mock_info - - if not vm_exists: - self.assertRaises(exception.InstanceNotFound, - self._vmops.get_info, mock_instance) - else: - response = self._vmops.get_info(mock_instance) - self._vmops._vmutils.vm_exists.assert_called_once_with( - mock_instance.name) - self._vmops._vmutils.get_vm_summary_info.assert_called_once_with( - mock_instance.name) - self.assertEqual(response, expected) - - def test_get_info(self): - self._test_get_info(vm_exists=True) - - def test_get_info_exception(self): - self._test_get_info(vm_exists=False) - - @mock.patch.object(vmops.VMOps, 'check_vm_image_type') - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - def test_create_root_device_type_disk(self, mock_create_root_device, - mock_check_vm_image_type): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_root_disk_info = {'type': constants.DISK} - - self._vmops._create_root_device(self.context, mock_instance, - mock_root_disk_info, - mock.sentinel.VM_GEN_1) - - mock_create_root_device.assert_called_once_with( - self.context, mock_instance) - mock_check_vm_image_type.assert_called_once_with( - mock_instance.uuid, mock.sentinel.VM_GEN_1, - mock_create_root_device.return_value) - - def _prepare_create_root_device_mocks(self, use_cow_images, vhd_format, - vhd_size): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.root_gb = self.FAKE_SIZE - self.flags(use_cow_images=use_cow_images) - self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize': - vhd_size * units.Gi} - self._vmops._vhdutils.get_vhd_format.return_value = vhd_format - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - get_size.return_value = root_vhd_internal_size - self._vmops._pathutils.exists.return_value = True - - return mock_instance - - @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') - def _test_create_root_vhd_exception(self, mock_get_cached_image, - vhd_format): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=False, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE + 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - mock_get_cached_image.return_value = fake_vhd_path - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - - self.assertRaises(exception.FlavorDiskSmallerThanImage, - self._vmops._create_root_vhd, self.context, - mock_instance) - - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - self._vmops._pathutils.exists.assert_called_once_with( - fake_root_path) - self._vmops._pathutils.remove.assert_called_once_with( - fake_root_path) - - @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') - def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=True, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE - 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - mock_get_cached_image.return_value = fake_vhd_path - - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - - response = self._vmops._create_root_vhd(context=self.context, - instance=mock_instance) - - self.assertEqual(fake_root_path, response) - self._vmops._pathutils.get_root_vhd_path.assert_called_with( - mock_instance.name, vhd_format, False) - differencing_vhd = self._vmops._vhdutils.create_differencing_vhd - differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path) - self._vmops._vhdutils.get_vhd_info.assert_called_once_with( - fake_vhd_path) - - if vhd_format is constants.DISK_FORMAT_VHD: - self.assertFalse(get_size.called) - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - else: - get_size.assert_called_once_with(fake_vhd_path, - root_vhd_internal_size) - self._vmops._vhdutils.resize_vhd.assert_called_once_with( - fake_root_path, root_vhd_internal_size, is_file_max_size=False) - - @mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image') - def _test_create_root_vhd(self, mock_get_cached_image, vhd_format, - is_rescue_vhd=False): - mock_instance = self._prepare_create_root_device_mocks( - use_cow_images=False, vhd_format=vhd_format, - vhd_size=(self.FAKE_SIZE - 1)) - fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format - mock_get_cached_image.return_value = fake_vhd_path - rescue_image_id = ( - mock.sentinel.rescue_image_id if is_rescue_vhd else None) - - fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value - root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi - get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size - - response = self._vmops._create_root_vhd( - context=self.context, - instance=mock_instance, - rescue_image_id=rescue_image_id) - - self.assertEqual(fake_root_path, response) - mock_get_cached_image.assert_called_once_with(self.context, - mock_instance, - rescue_image_id) - self._vmops._pathutils.get_root_vhd_path.assert_called_with( - mock_instance.name, vhd_format, is_rescue_vhd) - - self._vmops._pathutils.copyfile.assert_called_once_with( - fake_vhd_path, fake_root_path) - get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size) - if is_rescue_vhd: - self.assertFalse(self._vmops._vhdutils.resize_vhd.called) - else: - self._vmops._vhdutils.resize_vhd.assert_called_once_with( - fake_root_path, root_vhd_internal_size, - is_file_max_size=False) - - def test_create_root_vhd(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD) - - def test_create_root_vhdx(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX) - - def test_create_root_vhd_use_cow_images_true(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD) - - def test_create_root_vhdx_use_cow_images_true(self): - self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX) - - def test_create_rescue_vhd(self): - self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD, - is_rescue_vhd=True) - - def test_create_root_vhdx_size_less_than_internal(self): - self._test_create_root_vhd_exception( - vhd_format=constants.DISK_FORMAT_VHD) - - def test_is_resize_needed_exception(self): - inst = mock.MagicMock() - self.assertRaises( - exception.FlavorDiskSmallerThanImage, - self._vmops._is_resize_needed, - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst) - - def test_is_resize_needed_true(self): - inst = mock.MagicMock() - self.assertTrue(self._vmops._is_resize_needed( - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst)) - - def test_is_resize_needed_false(self): - inst = mock.MagicMock() - self.assertFalse(self._vmops._is_resize_needed( - mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst)) - - @mock.patch.object(vmops.VMOps, 'create_ephemeral_disk') - def test_create_ephemerals(self, mock_create_ephemeral_disk): - mock_instance = fake_instance.fake_instance_obj(self.context) - - fake_ephemerals = [dict(), dict()] - self._vmops._vhdutils.get_best_supported_vhd_format.return_value = ( - mock.sentinel.format) - self._vmops._pathutils.get_ephemeral_vhd_path.side_effect = [ - mock.sentinel.FAKE_PATH0, mock.sentinel.FAKE_PATH1] - - self._vmops._create_ephemerals(mock_instance, fake_ephemerals) - - self._vmops._pathutils.get_ephemeral_vhd_path.assert_has_calls( - [mock.call(mock_instance.name, mock.sentinel.format, 'eph0'), - mock.call(mock_instance.name, mock.sentinel.format, 'eph1')]) - mock_create_ephemeral_disk.assert_has_calls( - [mock.call(mock_instance.name, fake_ephemerals[0]), - mock.call(mock_instance.name, fake_ephemerals[1])]) - - def test_create_ephemeral_disk(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_ephemeral_info = {'path': 'fake_eph_path', - 'size': 10} - - self._vmops.create_ephemeral_disk(mock_instance.name, - mock_ephemeral_info) - - mock_create_dynamic_vhd = self._vmops._vhdutils.create_dynamic_vhd - mock_create_dynamic_vhd.assert_called_once_with('fake_eph_path', - 10 * units.Gi) - - @mock.patch.object(vmops.objects, 'PCIDeviceBus') - @mock.patch.object(vmops.objects, 'NetworkInterfaceMetadata') - @mock.patch.object(vmops.objects.VirtualInterfaceList, - 'get_by_instance_uuid') - def test_get_vif_metadata(self, mock_get_by_inst_uuid, - mock_NetworkInterfaceMetadata, mock_PCIDevBus): - mock_vif = mock.MagicMock(tag='taggy') - mock_vif.__contains__.side_effect = ( - lambda attr: getattr(mock_vif, attr, None) is not None) - mock_get_by_inst_uuid.return_value = [mock_vif, - mock.MagicMock(tag=None)] - - vif_metadata = self._vmops._get_vif_metadata(self.context, - mock.sentinel.instance_id) - - mock_get_by_inst_uuid.assert_called_once_with( - self.context, mock.sentinel.instance_id) - mock_NetworkInterfaceMetadata.assert_called_once_with( - mac=mock_vif.address, - bus=mock_PCIDevBus.return_value, - tags=[mock_vif.tag]) - self.assertEqual([mock_NetworkInterfaceMetadata.return_value], - vif_metadata) - - @mock.patch.object(vmops.objects, 'InstanceDeviceMetadata') - @mock.patch.object(vmops.VMOps, '_get_vif_metadata') - def test_save_device_metadata(self, mock_get_vif_metadata, - mock_InstanceDeviceMetadata): - mock_instance = mock.MagicMock() - mock_get_vif_metadata.return_value = [mock.sentinel.vif_metadata] - self._vmops._block_dev_man.get_bdm_metadata.return_value = [ - mock.sentinel.bdm_metadata] - - self._vmops._save_device_metadata(self.context, mock_instance, - mock.sentinel.block_device_info) - - mock_get_vif_metadata.assert_called_once_with(self.context, - mock_instance.uuid) - self._vmops._block_dev_man.get_bdm_metadata.assert_called_once_with( - self.context, mock_instance, mock.sentinel.block_device_info) - - expected_metadata = [mock.sentinel.vif_metadata, - mock.sentinel.bdm_metadata] - mock_InstanceDeviceMetadata.assert_called_once_with( - devices=expected_metadata) - self.assertEqual(mock_InstanceDeviceMetadata.return_value, - mock_instance.device_metadata) - - def test_set_boot_order(self): - self._vmops.set_boot_order(mock.sentinel.instance_name, - mock.sentinel.vm_gen, - mock.sentinel.bdi) - - mock_get_boot_order = self._vmops._block_dev_man.get_boot_order - mock_get_boot_order.assert_called_once_with( - mock.sentinel.vm_gen, mock.sentinel.bdi) - self._vmops._vmutils.set_boot_order.assert_called_once_with( - mock.sentinel.instance_name, mock_get_boot_order.return_value) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - @mock.patch('nova.virt.hyperv.vmops.VMOps.destroy') - @mock.patch('nova.virt.hyperv.vmops.VMOps.power_on') - @mock.patch('nova.virt.hyperv.vmops.VMOps.set_boot_order') - @mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive') - @mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive') - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch('nova.virt.hyperv.vmops.VMOps._save_device_metadata') - @mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance') - @mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation') - @mock.patch('nova.virt.hyperv.vmops.VMOps._create_ephemerals') - @mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_device') - @mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files') - @mock.patch('nova.virt.hyperv.vmops.VMOps._get_neutron_events', - return_value=[]) - def _test_spawn(self, mock_get_neutron_events, - mock_delete_disk_files, mock_create_root_device, - mock_create_ephemerals, mock_get_image_vm_gen, - mock_create_instance, mock_save_device_metadata, - mock_configdrive_required, - mock_create_config_drive, mock_attach_config_drive, - mock_set_boot_order, - mock_power_on, mock_destroy, mock_plug_vifs, - exists, configdrive_required, fail, - fake_vm_gen=constants.VM_GEN_2): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_image_meta = mock.MagicMock() - root_device_info = mock.sentinel.ROOT_DEV_INFO - mock_get_image_vm_gen.return_value = fake_vm_gen - fake_config_drive_path = mock_create_config_drive.return_value - block_device_info = {'ephemerals': [], 'root_disk': root_device_info} - - self._vmops._vmutils.vm_exists.return_value = exists - mock_configdrive_required.return_value = configdrive_required - mock_create_instance.side_effect = fail - if exists: - self.assertRaises(exception.InstanceExists, self._vmops.spawn, - self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info) - elif fail is os_win_exc.HyperVException: - self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn, - self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info) - mock_destroy.assert_called_once_with(mock_instance, - mock.sentinel.network_info, - block_device_info) - else: - self._vmops.spawn(self.context, mock_instance, mock_image_meta, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.network_info, block_device_info) - self._vmops._vmutils.vm_exists.assert_called_once_with( - mock_instance.name) - mock_delete_disk_files.assert_called_once_with( - mock_instance.name) - mock_validate_and_update_bdi = ( - self._vmops._block_dev_man.validate_and_update_bdi) - mock_validate_and_update_bdi.assert_called_once_with( - mock_instance, mock_image_meta, fake_vm_gen, block_device_info) - mock_create_root_device.assert_called_once_with(self.context, - mock_instance, - root_device_info, - fake_vm_gen) - mock_create_ephemerals.assert_called_once_with( - mock_instance, block_device_info['ephemerals']) - mock_get_neutron_events.assert_called_once_with( - mock.sentinel.network_info) - mock_get_image_vm_gen.assert_called_once_with(mock_instance.uuid, - mock_image_meta) - mock_create_instance.assert_called_once_with( - mock_instance, mock.sentinel.network_info, root_device_info, - block_device_info, fake_vm_gen, mock_image_meta) - mock_plug_vifs.assert_called_once_with(mock_instance, - mock.sentinel.network_info) - mock_save_device_metadata.assert_called_once_with( - self.context, mock_instance, block_device_info) - mock_configdrive_required.assert_called_once_with(mock_instance) - if configdrive_required: - mock_create_config_drive.assert_called_once_with( - self.context, mock_instance, [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.network_info) - mock_attach_config_drive.assert_called_once_with( - mock_instance, fake_config_drive_path, fake_vm_gen) - mock_set_boot_order.assert_called_once_with( - mock_instance.name, fake_vm_gen, block_device_info) - mock_power_on.assert_called_once_with( - mock_instance, - network_info=mock.sentinel.network_info, - should_plug_vifs=False) - - def test_spawn(self): - self._test_spawn(exists=False, configdrive_required=True, fail=None) - - def test_spawn_instance_exists(self): - self._test_spawn(exists=True, configdrive_required=True, fail=None) - - def test_spawn_create_instance_exception(self): - self._test_spawn(exists=False, configdrive_required=True, - fail=os_win_exc.HyperVException) - - def test_spawn_not_required(self): - self._test_spawn(exists=False, configdrive_required=False, fail=None) - - def test_spawn_no_admin_permissions(self): - self._vmops._vmutils.check_admin_permissions.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(os_win_exc.HyperVException, - self._vmops.spawn, - self.context, mock.DEFAULT, mock.DEFAULT, - [mock.sentinel.FILE], mock.sentinel.PASSWORD, - mock.sentinel.INFO, mock.sentinel.DEV_INFO) - - @mock.patch.object(vmops.VMOps, '_get_neutron_events') - def test_wait_vif_plug_events(self, mock_get_events): - self._vmops._virtapi.wait_for_instance_event.side_effect = ( - etimeout.Timeout) - self.flags(vif_plugging_timeout=1) - self.flags(vif_plugging_is_fatal=True) - - def _context_user(): - with self._vmops.wait_vif_plug_events(mock.sentinel.instance, - mock.sentinel.network_info): - pass - - self.assertRaises(exception.VirtualInterfaceCreateException, - _context_user) - - mock_get_events.assert_called_once_with(mock.sentinel.network_info) - self._vmops._virtapi.wait_for_instance_event.assert_called_once_with( - mock.sentinel.instance, mock_get_events.return_value, - deadline=CONF.vif_plugging_timeout, - error_callback=self._vmops._neutron_failed_callback) - - @mock.patch.object(vmops.VMOps, '_get_neutron_events') - def test_wait_vif_plug_events_port_binding_failed(self, mock_get_events): - mock_get_events.side_effect = exception.PortBindingFailed( - port_id='fake_id') - - def _context_user(): - with self._vmops.wait_vif_plug_events(mock.sentinel.instance, - mock.sentinel.network_info): - pass - - self.assertRaises(exception.PortBindingFailed, _context_user) - - def test_neutron_failed_callback(self): - self.flags(vif_plugging_is_fatal=True) - self.assertRaises(exception.VirtualInterfaceCreateException, - self._vmops._neutron_failed_callback, - mock.sentinel.event_name, mock.sentinel.instance) - - def test_get_neutron_events(self): - network_info = [{'id': mock.sentinel.vif_id1, 'active': True}, - {'id': mock.sentinel.vif_id2, 'active': False}, - {'id': mock.sentinel.vif_id3}] - - events = self._vmops._get_neutron_events(network_info) - self.assertEqual([('network-vif-plugged', mock.sentinel.vif_id2)], - events) - - def test_get_neutron_events_no_timeout(self): - self.flags(vif_plugging_timeout=0) - network_info = [{'id': mock.sentinel.vif_id1, 'active': True}] - - events = self._vmops._get_neutron_events(network_info) - self.assertEqual([], events) - - @mock.patch.object(vmops.version, 'product_string') - @mock.patch.object(vmops.VMOps, '_attach_pci_devices') - @mock.patch.object(vmops.VMOps, '_requires_secure_boot') - @mock.patch.object(vmops.VMOps, '_requires_certificate') - @mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config') - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' - '.attach_volumes') - @mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs') - @mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes') - @mock.patch.object(vmops.VMOps, '_attach_ephemerals') - @mock.patch.object(vmops.VMOps, '_attach_root_device') - @mock.patch.object(vmops.VMOps, '_configure_remotefx') - def _test_create_instance(self, mock_configure_remotefx, - mock_attach_root_device, - mock_attach_ephemerals, - mock_create_pipes, - mock_set_qos_specs, - mock_attach_volumes, - mock_get_vnuma_config, - mock_requires_certificate, - mock_requires_secure_boot, - mock_attach_pci_devices, - mock_product_string, - enable_instance_metrics, - vm_gen=constants.VM_GEN_1, - vnuma_enabled=False, - pci_requests=None): - self.flags(dynamic_memory_ratio=2.0, group='hyperv') - self.flags(enable_instance_metrics_collection=enable_instance_metrics, - group='hyperv') - root_device_info = mock.sentinel.ROOT_DEV_INFO - block_device_info = {'ephemerals': [], 'block_device_mapping': []} - fake_network_info = {'id': mock.sentinel.ID, - 'address': mock.sentinel.ADDRESS} - mock_instance = fake_instance.fake_instance_obj(self.context) - instance_path = os.path.join(CONF.instances_path, mock_instance.name) - mock_requires_secure_boot.return_value = True - - flavor = flavor_obj.Flavor(**test_flavor.fake_flavor) - mock_instance.flavor = flavor - instance_pci_requests = objects.InstancePCIRequests( - requests=pci_requests or [], instance_uuid=mock_instance.uuid) - mock_instance.pci_requests = instance_pci_requests - host_shutdown_action = (os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN - if pci_requests else None) - - if vnuma_enabled: - mock_get_vnuma_config.return_value = ( - mock.sentinel.mem_per_numa, mock.sentinel.cpus_per_numa) - cpus_per_numa = mock.sentinel.cpus_per_numa - mem_per_numa = mock.sentinel.mem_per_numa - dynamic_memory_ratio = 1.0 - else: - mock_get_vnuma_config.return_value = (None, None) - mem_per_numa, cpus_per_numa = (None, None) - dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio - - self._vmops.create_instance(instance=mock_instance, - network_info=[fake_network_info], - root_device=root_device_info, - block_device_info=block_device_info, - vm_gen=vm_gen, - image_meta=mock.sentinel.image_meta) - - mock_get_vnuma_config.assert_called_once_with(mock_instance, - mock.sentinel.image_meta) - self._vmops._vmutils.create_vm.assert_called_once_with( - mock_instance.name, vnuma_enabled, vm_gen, - instance_path, [mock_instance.uuid]) - self._vmops._vmutils.update_vm.assert_called_once_with( - mock_instance.name, mock_instance.flavor.memory_mb, mem_per_numa, - mock_instance.flavor.vcpus, cpus_per_numa, - CONF.hyperv.limit_cpu_features, dynamic_memory_ratio, - host_shutdown_action=host_shutdown_action, - chassis_asset_tag=mock_product_string.return_value) - - mock_configure_remotefx.assert_called_once_with(mock_instance, vm_gen) - mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller - mock_create_scsi_ctrl.assert_called_once_with(mock_instance.name) - - mock_attach_root_device.assert_called_once_with(mock_instance.name, - root_device_info) - mock_attach_ephemerals.assert_called_once_with(mock_instance.name, - block_device_info['ephemerals']) - mock_attach_volumes.assert_called_once_with( - block_device_info['block_device_mapping'], mock_instance.name) - - self._vmops._vmutils.create_nic.assert_called_once_with( - mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS) - mock_enable = self._vmops._metricsutils.enable_vm_metrics_collection - if enable_instance_metrics: - mock_enable.assert_called_once_with(mock_instance.name) - mock_set_qos_specs.assert_called_once_with(mock_instance) - mock_requires_secure_boot.assert_called_once_with( - mock_instance, mock.sentinel.image_meta, vm_gen) - mock_requires_certificate.assert_called_once_with( - mock.sentinel.image_meta) - enable_secure_boot = self._vmops._vmutils.enable_secure_boot - enable_secure_boot.assert_called_once_with( - mock_instance.name, - msft_ca_required=mock_requires_certificate.return_value) - mock_attach_pci_devices.assert_called_once_with(mock_instance) - - def test_create_instance(self): - self._test_create_instance(enable_instance_metrics=True) - - def test_create_instance_enable_instance_metrics_false(self): - self._test_create_instance(enable_instance_metrics=False) - - def test_create_instance_gen2(self): - self._test_create_instance(enable_instance_metrics=False, - vm_gen=constants.VM_GEN_2) - - def test_create_instance_vnuma_enabled(self): - self._test_create_instance(enable_instance_metrics=False, - vnuma_enabled=True) - - def test_create_instance_pci_requested(self): - vendor_id = 'fake_vendor_id' - product_id = 'fake_product_id' - spec = {'vendor_id': vendor_id, 'product_id': product_id} - request = objects.InstancePCIRequest(count=1, spec=[spec]) - self._test_create_instance(enable_instance_metrics=False, - pci_requests=[request]) - - def test_attach_pci_devices(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - vendor_id = 'fake_vendor_id' - product_id = 'fake_product_id' - spec = {'vendor_id': vendor_id, 'product_id': product_id} - request = objects.InstancePCIRequest(count=2, spec=[spec]) - instance_pci_requests = objects.InstancePCIRequests( - requests=[request], instance_uuid=mock_instance.uuid) - mock_instance.pci_requests = instance_pci_requests - - self._vmops._attach_pci_devices(mock_instance) - - self._vmops._vmutils.add_pci_device.assert_has_calls( - [mock.call(mock_instance.name, vendor_id, product_id)] * 2) - - @mock.patch.object(vmops.hardware, 'numa_get_constraints') - def _check_get_instance_vnuma_config_exception(self, mock_get_numa, - numa_cells): - flavor = {'extra_specs': {}} - mock_instance = mock.MagicMock(flavor=flavor) - image_meta = mock.MagicMock(properties={}) - numa_topology = objects.InstanceNUMATopology(cells=numa_cells) - mock_get_numa.return_value = numa_topology - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._get_instance_vnuma_config, - mock_instance, image_meta) - - def test_get_instance_vnuma_config_bad_cpuset(self): - cell1 = objects.InstanceNUMACell( - cpuset=set([0]), pcpuset=set(), memory=1024) - cell2 = objects.InstanceNUMACell( - cpuset=set([1, 2]), pcpuset=set(), memory=1024) - self._check_get_instance_vnuma_config_exception( - numa_cells=[cell1, cell2]) - - def test_get_instance_vnuma_config_bad_memory(self): - cell1 = objects.InstanceNUMACell( - cpuset=set([0]), pcpuset=set(), memory=1024) - cell2 = objects.InstanceNUMACell( - cpuset=set([1]), pcpuset=set(), memory=2048) - self._check_get_instance_vnuma_config_exception( - numa_cells=[cell1, cell2]) - - def test_get_instance_vnuma_config_cpu_pinning(self): - cell1 = objects.InstanceNUMACell( - cpuset=set([0]), pcpuset=set(), memory=1024, - cpu_policy=fields.CPUAllocationPolicy.DEDICATED) - cell2 = objects.InstanceNUMACell( - cpuset=set([1]), pcpuset=set(), memory=1024, - cpu_policy=fields.CPUAllocationPolicy.DEDICATED) - self._check_get_instance_vnuma_config_exception( - numa_cells=[cell1, cell2]) - - @mock.patch.object(vmops.hardware, 'numa_get_constraints') - def _check_get_instance_vnuma_config( - self, mock_get_numa, numa_topology=None, - expected_mem_per_numa=None, expected_cpus_per_numa=None): - mock_instance = mock.MagicMock() - image_meta = mock.MagicMock() - mock_get_numa.return_value = numa_topology - - result_memory_per_numa, result_cpus_per_numa = ( - self._vmops._get_instance_vnuma_config(mock_instance, image_meta)) - - self.assertEqual(expected_cpus_per_numa, result_cpus_per_numa) - self.assertEqual(expected_mem_per_numa, result_memory_per_numa) - - def test_get_instance_vnuma_config(self): - cell1 = objects.InstanceNUMACell( - cpuset=set([0]), pcpuset=set(), memory=2048) - cell2 = objects.InstanceNUMACell( - cpuset=set([1]), pcpuset=set(), memory=2048) - numa_topology = objects.InstanceNUMATopology(cells=[cell1, cell2]) - self._check_get_instance_vnuma_config(numa_topology=numa_topology, - expected_cpus_per_numa=1, - expected_mem_per_numa=2048) - - def test_get_instance_vnuma_config_no_topology(self): - self._check_get_instance_vnuma_config() - - @mock.patch.object(vmops.volumeops.VolumeOps, 'attach_volume') - def test_attach_root_device_volume(self, mock_attach_volume): - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device_info = {'type': constants.VOLUME, - 'connection_info': mock.sentinel.CONN_INFO, - 'disk_bus': constants.CTRL_TYPE_IDE} - - self._vmops._attach_root_device(mock_instance.name, root_device_info) - - mock_attach_volume.assert_called_once_with( - root_device_info['connection_info'], mock_instance.name, - disk_bus=root_device_info['disk_bus']) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_root_device_disk(self, mock_attach_drive): - mock_instance = fake_instance.fake_instance_obj(self.context) - root_device_info = {'type': constants.DISK, - 'boot_index': 0, - 'disk_bus': constants.CTRL_TYPE_IDE, - 'path': 'fake_path', - 'drive_addr': 0, - 'ctrl_disk_addr': 1} - - self._vmops._attach_root_device(mock_instance.name, root_device_info) - - mock_attach_drive.assert_called_once_with( - mock_instance.name, root_device_info['path'], - root_device_info['drive_addr'], root_device_info['ctrl_disk_addr'], - root_device_info['disk_bus'], root_device_info['type']) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_ephemerals(self, mock_attach_drive): - mock_instance = fake_instance.fake_instance_obj(self.context) - - ephemerals = [{'path': mock.sentinel.PATH1, - 'boot_index': 1, - 'disk_bus': constants.CTRL_TYPE_IDE, - 'device_type': 'disk', - 'drive_addr': 0, - 'ctrl_disk_addr': 1}, - {'path': mock.sentinel.PATH2, - 'boot_index': 2, - 'disk_bus': constants.CTRL_TYPE_SCSI, - 'device_type': 'disk', - 'drive_addr': 0, - 'ctrl_disk_addr': 0}, - {'path': None}] - - self._vmops._attach_ephemerals(mock_instance.name, ephemerals) - - mock_attach_drive.assert_has_calls( - [mock.call(mock_instance.name, mock.sentinel.PATH1, 0, - 1, constants.CTRL_TYPE_IDE, constants.DISK), - mock.call(mock_instance.name, mock.sentinel.PATH2, 0, - 0, constants.CTRL_TYPE_SCSI, constants.DISK) - ]) - - def test_attach_drive_vm_to_scsi(self): - self._vmops._attach_drive( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.CTRL_TYPE_SCSI) - - self._vmops._vmutils.attach_scsi_drive.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - constants.DISK) - - def test_attach_drive_vm_to_ide(self): - self._vmops._attach_drive( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.CTRL_TYPE_IDE) - - self._vmops._vmutils.attach_ide_drive.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH, - mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR, - constants.DISK) - - def test_get_image_vm_generation_default(self): - image_meta = objects.ImageMeta.from_dict({"properties": {}}) - self._vmops._hostutils.get_default_vm_generation.return_value = ( - constants.IMAGE_PROP_VM_GEN_1) - self._vmops._hostutils.get_supported_vm_types.return_value = [ - constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] - - response = self._vmops.get_image_vm_generation( - mock.sentinel.instance_id, image_meta) - - self.assertEqual(constants.VM_GEN_1, response) - - def test_get_image_vm_generation_gen2(self): - image_meta = objects.ImageMeta.from_dict( - {"properties": - {"hw_machine_type": constants.IMAGE_PROP_VM_GEN_2}}) - self._vmops._hostutils.get_supported_vm_types.return_value = [ - constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2] - - response = self._vmops.get_image_vm_generation( - mock.sentinel.instance_id, image_meta) - - self.assertEqual(constants.VM_GEN_2, response) - - def test_check_vm_image_type_exception(self): - self._vmops._vhdutils.get_vhd_format.return_value = ( - constants.DISK_FORMAT_VHD) - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops.check_vm_image_type, - mock.sentinel.instance_id, constants.VM_GEN_2, - mock.sentinel.FAKE_PATH) - - def _check_requires_certificate(self, os_type): - mock_image_meta = mock.MagicMock() - mock_image_meta.properties = {'os_type': os_type} - - expected_result = os_type == fields.OSType.LINUX - result = self._vmops._requires_certificate(mock_image_meta) - self.assertEqual(expected_result, result) - - def test_requires_certificate_windows(self): - self._check_requires_certificate(os_type=fields.OSType.WINDOWS) - - def test_requires_certificate_linux(self): - self._check_requires_certificate(os_type=fields.OSType.LINUX) - - def _check_requires_secure_boot( - self, image_prop_os_type=fields.OSType.LINUX, - image_prop_secure_boot=fields.SecureBoot.REQUIRED, - flavor_secure_boot=fields.SecureBoot.REQUIRED, - vm_gen=constants.VM_GEN_2, expected_exception=True): - mock_instance = fake_instance.fake_instance_obj(self.context) - if flavor_secure_boot: - mock_instance.flavor.extra_specs = { - constants.FLAVOR_SPEC_SECURE_BOOT: flavor_secure_boot} - mock_image_meta = mock.MagicMock() - mock_image_meta.properties = {'os_type': image_prop_os_type} - if image_prop_secure_boot: - mock_image_meta.properties['os_secure_boot'] = ( - image_prop_secure_boot) - - if expected_exception: - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._requires_secure_boot, - mock_instance, mock_image_meta, vm_gen) - else: - result = self._vmops._requires_secure_boot(mock_instance, - mock_image_meta, - vm_gen) - - requires_sb = fields.SecureBoot.REQUIRED in [ - flavor_secure_boot, image_prop_secure_boot] - self.assertEqual(requires_sb, result) - - def test_requires_secure_boot_ok(self): - self._check_requires_secure_boot( - expected_exception=False) - - def test_requires_secure_boot_image_img_prop_none(self): - self._check_requires_secure_boot( - image_prop_secure_boot=None, - expected_exception=False) - - def test_requires_secure_boot_image_extra_spec_none(self): - self._check_requires_secure_boot( - flavor_secure_boot=None, - expected_exception=False) - - def test_requires_secure_boot_flavor_no_os_type(self): - self._check_requires_secure_boot( - image_prop_os_type=None) - - def test_requires_secure_boot_flavor_no_os_type_no_exc(self): - self._check_requires_secure_boot( - image_prop_os_type=None, - image_prop_secure_boot=fields.SecureBoot.DISABLED, - flavor_secure_boot=fields.SecureBoot.DISABLED, - expected_exception=False) - - def test_requires_secure_boot_flavor_disabled(self): - self._check_requires_secure_boot( - flavor_secure_boot=fields.SecureBoot.DISABLED) - - def test_requires_secure_boot_image_disabled(self): - self._check_requires_secure_boot( - image_prop_secure_boot=fields.SecureBoot.DISABLED) - - def test_requires_secure_boot_generation_1(self): - self._check_requires_secure_boot(vm_gen=constants.VM_GEN_1) - - @mock.patch('nova.api.metadata.base.InstanceMetadata') - @mock.patch('nova.virt.configdrive.ConfigDriveBuilder') - @mock.patch('oslo_concurrency.processutils.execute') - def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder, - mock_InstanceMetadata, config_drive_format, - config_drive_cdrom, side_effect, - rescue=False): - mock_instance = fake_instance.fake_instance_obj(self.context) - self.flags(config_drive_format=config_drive_format) - self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv') - self.flags(config_drive_inject_password=True, group='hyperv') - mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [ - side_effect] - - path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO) - path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD) - - def fake_get_configdrive_path(instance_name, disk_format, - rescue=False): - return (path_iso - if disk_format == constants.DVD_FORMAT else path_vhd) - - mock_get_configdrive_path = self._vmops._pathutils.get_configdrive_path - mock_get_configdrive_path.side_effect = fake_get_configdrive_path - expected_get_configdrive_path_calls = [mock.call(mock_instance.name, - constants.DVD_FORMAT, - rescue=rescue)] - if not config_drive_cdrom: - expected_call = mock.call(mock_instance.name, - constants.DISK_FORMAT_VHD, - rescue=rescue) - expected_get_configdrive_path_calls.append(expected_call) - - if config_drive_format != self.ISO9660: - self.assertRaises(exception.ConfigDriveUnsupportedFormat, - self._vmops._create_config_drive, - self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - elif side_effect is processutils.ProcessExecutionError: - self.assertRaises(processutils.ProcessExecutionError, - self._vmops._create_config_drive, - self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - else: - path = self._vmops._create_config_drive(self.context, - mock_instance, - [mock.sentinel.FILE], - mock.sentinel.PASSWORD, - mock.sentinel.NET_INFO, - rescue) - mock_InstanceMetadata.assert_called_once_with( - mock_instance, content=[mock.sentinel.FILE], - extra_md={'admin_pass': mock.sentinel.PASSWORD}, - network_info=mock.sentinel.NET_INFO) - mock_get_configdrive_path.assert_has_calls( - expected_get_configdrive_path_calls) - mock_ConfigDriveBuilder.assert_called_with( - instance_md=mock_InstanceMetadata.return_value) - mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive - mock_make_drive.assert_called_once_with(path_iso) - if not CONF.hyperv.config_drive_cdrom: - expected = path_vhd - mock_execute.assert_called_once_with( - CONF.hyperv.qemu_img_cmd, - 'convert', '-f', 'raw', '-O', 'vpc', - path_iso, path_vhd, attempts=1) - self._vmops._pathutils.remove.assert_called_once_with( - os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)) - else: - expected = path_iso - - self.assertEqual(expected, path) - - def test_create_config_drive_cdrom(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=True, - side_effect=None) - - def test_create_config_drive_vhd(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=None) - - def test_create_rescue_config_drive_vhd(self): - self._test_create_config_drive(config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=None, - rescue=True) - - def test_create_config_drive_execution_error(self): - self._test_create_config_drive( - config_drive_format=self.ISO9660, - config_drive_cdrom=False, - side_effect=processutils.ProcessExecutionError) - - def test_attach_config_drive_exception(self): - instance = fake_instance.fake_instance_obj(self.context) - self.assertRaises(exception.InvalidDiskFormat, - self._vmops.attach_config_drive, - instance, 'C:/fake_instance_dir/configdrive.xxx', - constants.VM_GEN_1) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_config_drive(self, mock_attach_drive): - instance = fake_instance.fake_instance_obj(self.context) - self._vmops.attach_config_drive(instance, - self._FAKE_CONFIGDRIVE_PATH, - constants.VM_GEN_1) - mock_attach_drive.assert_called_once_with( - instance.name, self._FAKE_CONFIGDRIVE_PATH, - 1, 0, constants.CTRL_TYPE_IDE, constants.DISK) - - @mock.patch.object(vmops.VMOps, '_attach_drive') - def test_attach_config_drive_gen2(self, mock_attach_drive): - instance = fake_instance.fake_instance_obj(self.context) - self._vmops.attach_config_drive(instance, - self._FAKE_CONFIGDRIVE_PATH, - constants.VM_GEN_2) - mock_attach_drive.assert_called_once_with( - instance.name, self._FAKE_CONFIGDRIVE_PATH, - 1, 0, constants.CTRL_TYPE_SCSI, constants.DISK) - - def test_detach_config_drive(self): - is_rescue_configdrive = True - mock_lookup_configdrive = ( - self._vmops._pathutils.lookup_configdrive_path) - mock_lookup_configdrive.return_value = mock.sentinel.configdrive_path - - self._vmops._detach_config_drive(mock.sentinel.instance_name, - rescue=is_rescue_configdrive, - delete=True) - - mock_lookup_configdrive.assert_called_once_with( - mock.sentinel.instance_name, - rescue=is_rescue_configdrive) - self._vmops._vmutils.detach_vm_disk.assert_called_once_with( - mock.sentinel.instance_name, mock.sentinel.configdrive_path, - is_physical=False) - self._vmops._pathutils.remove.assert_called_once_with( - mock.sentinel.configdrive_path) - - def test_delete_disk_files(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._delete_disk_files(mock_instance.name) - - stop_console_handler = ( - self._vmops._serial_console_ops.stop_console_handler_unsync) - stop_console_handler.assert_called_once_with(mock_instance.name) - self._vmops._pathutils.get_instance_dir.assert_called_once_with( - mock_instance.name, create_dir=False, remove_dir=True) - - @ddt.data({}, - {'vm_exists': True}, - {'planned_vm_exists': True}) - @ddt.unpack - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes') - @mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files') - @mock.patch('nova.virt.hyperv.vmops.VMOps.power_off') - @mock.patch('nova.virt.hyperv.vmops.VMOps.unplug_vifs') - def test_destroy(self, mock_unplug_vifs, mock_power_off, - mock_delete_disk_files, mock_disconnect_volumes, - vm_exists=False, planned_vm_exists=False): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._vmutils.vm_exists.return_value = vm_exists - self._vmops._migrutils.planned_vm_exists.return_value = ( - planned_vm_exists) - - self._vmops.destroy(instance=mock_instance, - block_device_info=mock.sentinel.FAKE_BD_INFO, - network_info=mock.sentinel.fake_network_info) - - mock_destroy_planned_vms = ( - self._vmops._migrutils.destroy_existing_planned_vm) - if vm_exists: - self._vmops._vmutils.stop_vm_jobs.assert_called_once_with( - mock_instance.name) - mock_power_off.assert_called_once_with(mock_instance) - self._vmops._vmutils.destroy_vm.assert_called_once_with( - mock_instance.name) - elif planned_vm_exists: - self._vmops._migrutils.planned_vm_exists.assert_called_once_with( - mock_instance.name) - mock_destroy_planned_vms.assert_called_once_with( - mock_instance.name) - else: - self.assertFalse(self._vmops._vmutils.destroy_vm.called) - self.assertFalse(mock_destroy_planned_vms.called) - - self._vmops._vmutils.vm_exists.assert_called_with( - mock_instance.name) - mock_unplug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.fake_network_info) - mock_disconnect_volumes.assert_called_once_with( - mock.sentinel.FAKE_BD_INFO, force=True) - mock_delete_disk_files.assert_called_once_with( - mock_instance.name) - - @mock.patch('nova.virt.hyperv.vmops.VMOps.power_off') - def test_destroy_exception(self, mock_power_off): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._vmutils.destroy_vm.side_effect = ( - os_win_exc.HyperVException) - self._vmops._vmutils.vm_exists.return_value = True - - self.assertRaises(os_win_exc.HyperVException, - self._vmops.destroy, mock_instance, - mock.sentinel.network_info, - mock.sentinel.block_device_info) - - def test_reboot_hard(self): - self._test_reboot(vmops.REBOOT_TYPE_HARD, - os_win_const.HYPERV_VM_STATE_REBOOT) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_reboot_soft(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = True - self._test_reboot(vmops.REBOOT_TYPE_SOFT, - os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_reboot_soft_failed(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = False - self._test_reboot(vmops.REBOOT_TYPE_SOFT, - os_win_const.HYPERV_VM_STATE_REBOOT) - - @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on") - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on): - mock_soft_shutdown.return_value = True - mock_power_on.side_effect = os_win_exc.HyperVException( - "Expected failure") - instance = fake_instance.fake_instance_obj(self.context) - - self.assertRaises(os_win_exc.HyperVException, self._vmops.reboot, - instance, {}, vmops.REBOOT_TYPE_SOFT) - - mock_soft_shutdown.assert_called_once_with(instance) - mock_power_on.assert_called_once_with(instance, network_info={}) - - def _test_reboot(self, reboot_type, vm_state): - instance = fake_instance.fake_instance_obj(self.context) - with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: - self._vmops.reboot(instance, {}, reboot_type) - mock_set_state.assert_called_once_with(instance, vm_state) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.return_value = True - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) - - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_once_with(instance.name) - mock_wait_for_power_off.assert_called_once_with( - instance.name, self._FAKE_TIMEOUT) - - self.assertTrue(result) - - @mock.patch("time.sleep") - def test_soft_shutdown_failed(self, mock_sleep): - instance = fake_instance.fake_instance_obj(self.context) - - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.side_effect = os_win_exc.HyperVException( - "Expected failure.") - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT) - - mock_shutdown_vm.assert_called_once_with(instance.name) - self.assertFalse(result) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown_wait(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.side_effect = [False, True] - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1) - - calls = [mock.call(instance.name, 1), - mock.call(instance.name, self._FAKE_TIMEOUT - 1)] - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_with(instance.name) - mock_wait_for_power_off.assert_has_calls(calls) - - self.assertTrue(result) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off") - def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off): - instance = fake_instance.fake_instance_obj(self.context) - mock_wait_for_power_off.return_value = False - - result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5) - - calls = [mock.call(instance.name, 1.5), - mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)] - mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm - mock_shutdown_vm.assert_called_with(instance.name) - mock_wait_for_power_off.assert_has_calls(calls) - - self.assertFalse(result) - - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_pause(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.pause(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED) - - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_unpause(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.unpause(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_suspend(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.suspend(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED) - - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_resume(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops.resume(instance=mock_instance) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - def _test_power_off(self, timeout, set_state_expected=True): - instance = fake_instance.fake_instance_obj(self.context) - with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state: - self._vmops.power_off(instance, timeout) - - serialops = self._vmops._serial_console_ops - serialops.stop_console_handler.assert_called_once_with( - instance.name) - if set_state_expected: - mock_set_state.assert_called_once_with( - instance, os_win_const.HYPERV_VM_STATE_DISABLED) - - def test_power_off_hard(self): - self._test_power_off(timeout=0) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_power_off_exception(self, mock_soft_shutdown): - mock_soft_shutdown.return_value = False - self._test_power_off(timeout=1) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state") - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_power_off_soft(self, mock_soft_shutdown, mock_set_state): - instance = fake_instance.fake_instance_obj(self.context) - mock_soft_shutdown.return_value = True - - self._vmops.power_off(instance, 1, 0) - - serialops = self._vmops._serial_console_ops - serialops.stop_console_handler.assert_called_once_with( - instance.name) - mock_soft_shutdown.assert_called_once_with( - instance, 1, vmops.SHUTDOWN_TIME_INCREMENT) - self.assertFalse(mock_set_state.called) - - @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown") - def test_power_off_unexisting_instance(self, mock_soft_shutdown): - mock_soft_shutdown.side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name=mock.sentinel.vm_name) - self._test_power_off(timeout=1, set_state_expected=False) - - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_power_on(self, mock_set_vm_state): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance) - - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch('nova.virt.hyperv.volumeops.VolumeOps' - '.fix_instance_volume_disk_paths') - @mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state') - def test_power_on_having_block_devices(self, mock_set_vm_state, - mock_fix_instance_vol_paths): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, mock.sentinel.block_device_info) - - mock_fix_instance_vol_paths.assert_called_once_with( - mock_instance.name, mock.sentinel.block_device_info) - mock_set_vm_state.assert_called_once_with( - mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - def test_power_on_with_network_info(self, mock_plug_vifs): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, - network_info=mock.sentinel.fake_network_info) - mock_plug_vifs.assert_called_once_with( - mock_instance, mock.sentinel.fake_network_info) - - @mock.patch.object(vmops.VMOps, 'plug_vifs') - def test_power_on_vifs_already_plugged(self, mock_plug_vifs): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops.power_on(mock_instance, - should_plug_vifs=False) - self.assertFalse(mock_plug_vifs.called) - - def _test_set_vm_state(self, state): - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops._set_vm_state(mock_instance, state) - self._vmops._vmutils.set_vm_state.assert_called_once_with( - mock_instance.name, state) - - def test_set_vm_state_disabled(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED) - - def test_set_vm_state_enabled(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED) - - def test_set_vm_state_reboot(self): - self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT) - - def test_set_vm_state_exception(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - self._vmops._vmutils.set_vm_state.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(os_win_exc.HyperVException, - self._vmops._set_vm_state, - mock_instance, mock.sentinel.STATE) - - def test_get_vm_state(self): - summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED} - - self._vmops._vmutils.get_vm_summary_info.return_value = summary_info - - response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME) - self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def test_wait_for_power_off_true(self, mock_get_state): - mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED - result = self._vmops._wait_for_power_off( - mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) - mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME) - self.assertTrue(result) - - @mock.patch.object(vmops.etimeout, "with_timeout") - def test_wait_for_power_off_false(self, mock_with_timeout): - mock_with_timeout.side_effect = etimeout.Timeout() - result = self._vmops._wait_for_power_off( - mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT) - self.assertFalse(result) - - def test_create_vm_com_port_pipes(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_serial_ports = { - 1: constants.SERIAL_PORT_TYPE_RO, - 2: constants.SERIAL_PORT_TYPE_RW - } - - self._vmops._create_vm_com_port_pipes(mock_instance, - mock_serial_ports) - expected_calls = [] - for port_number, port_type in mock_serial_ports.items(): - expected_pipe = r'\\.\pipe\%s_%s' % (mock_instance.uuid, - port_type) - expected_calls.append(mock.call(mock_instance.name, - port_number, - expected_pipe)) - - mock_set_conn = self._vmops._vmutils.set_vm_serial_port_connection - mock_set_conn.assert_has_calls(expected_calls) - - def test_list_instance_uuids(self): - fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3' - self._vmops._vmutils.list_instance_notes.return_value = ( - [('fake_name', [fake_uuid])]) - - response = self._vmops.list_instance_uuids() - self._vmops._vmutils.list_instance_notes.assert_called_once_with() - - self.assertEqual(response, [fake_uuid]) - - def test_copy_vm_dvd_disks(self): - fake_paths = [mock.sentinel.FAKE_DVD_PATH1, - mock.sentinel.FAKE_DVD_PATH2] - mock_copy = self._vmops._pathutils.copyfile - mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths - mock_get_dvd_disk_paths.return_value = fake_paths - self._vmops._pathutils.get_instance_dir.return_value = ( - mock.sentinel.FAKE_DEST_PATH) - - self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME, - mock.sentinel.FAKE_DEST_HOST) - - mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME) - self._vmops._pathutils.get_instance_dir.assert_called_once_with( - mock.sentinel.FAKE_VM_NAME, - remote_server=mock.sentinel.FAKE_DEST_HOST) - self.assertEqual(2, mock_copy.call_count) - mock_copy.assert_has_calls([mock.call(mock.sentinel.FAKE_DVD_PATH1, - mock.sentinel.FAKE_DEST_PATH), - mock.call(mock.sentinel.FAKE_DVD_PATH2, - mock.sentinel.FAKE_DEST_PATH)]) - - def test_plug_vifs(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_vif1 = {'id': mock.sentinel.ID1, - 'type': mock.sentinel.vif_type1} - fake_vif2 = {'id': mock.sentinel.ID2, - 'type': mock.sentinel.vif_type2} - mock_network_info = [fake_vif1, fake_vif2] - calls = [mock.call(mock_instance, fake_vif1), - mock.call(mock_instance, fake_vif2)] - - self._vmops.plug_vifs(mock_instance, - network_info=mock_network_info) - self._vmops._vif_driver.plug.assert_has_calls(calls) - - def test_unplug_vifs(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - fake_vif1 = {'id': mock.sentinel.ID1, - 'type': mock.sentinel.vif_type1} - fake_vif2 = {'id': mock.sentinel.ID2, - 'type': mock.sentinel.vif_type2} - mock_network_info = [fake_vif1, fake_vif2] - calls = [mock.call(mock_instance, fake_vif1), - mock.call(mock_instance, fake_vif2)] - - self._vmops.unplug_vifs(mock_instance, - network_info=mock_network_info) - self._vmops._vif_driver.unplug.assert_has_calls(calls) - - def _setup_remotefx_mocks(self): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs = { - 'os:resolution': os_win_const.REMOTEFX_MAX_RES_1920x1200, - 'os:monitors': '2', - 'os:vram': '256'} - - return mock_instance - - def test_configure_remotefx_not_required(self): - self.flags(enable_remotefx=False, group='hyperv') - mock_instance = fake_instance.fake_instance_obj(self.context) - - self._vmops._configure_remotefx(mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx_exception_enable_config(self): - self.flags(enable_remotefx=False, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx_exception_server_feature(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = False - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx_exception_vm_gen(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = True - self._vmops._vmutils.vm_gen_supports_remotefx.return_value = False - - self.assertRaises(exception.InstanceUnacceptable, - self._vmops._configure_remotefx, - mock_instance, mock.sentinel.VM_GEN) - - def test_configure_remotefx(self): - self.flags(enable_remotefx=True, group='hyperv') - mock_instance = self._setup_remotefx_mocks() - self._vmops._hostutils.check_server_feature.return_value = True - self._vmops._vmutils.vm_gen_supports_remotefx.return_value = True - extra_specs = mock_instance.flavor.extra_specs - - self._vmops._configure_remotefx(mock_instance, - constants.VM_GEN_1) - mock_enable_remotefx = ( - self._vmops._vmutils.enable_remotefx_video_adapter) - mock_enable_remotefx.assert_called_once_with( - mock_instance.name, int(extra_specs['os:monitors']), - extra_specs['os:resolution'], - int(extra_specs['os:vram']) * units.Mi) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state): - fake_vm = fake_instance.fake_instance_obj(self.context) - mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED - - result = self._vmops._check_hotplug_available(fake_vm) - - self.assertTrue(result) - mock_get_vm_state.assert_called_once_with(fake_vm.name) - self.assertFalse( - self._vmops._hostutils.check_min_windows_version.called) - self.assertFalse(self._vmops._vmutils.get_vm_generation.called) - - @mock.patch.object(vmops.VMOps, '_get_vm_state') - def _test_check_hotplug_available( - self, mock_get_vm_state, expected_result=False, - vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10): - - fake_vm = fake_instance.fake_instance_obj(self.context) - mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED - self._vmops._vmutils.get_vm_generation.return_value = vm_gen - fake_check_win_vers = self._vmops._hostutils.check_min_windows_version - fake_check_win_vers.return_value = ( - windows_version == self._WIN_VERSION_10) - - result = self._vmops._check_hotplug_available(fake_vm) - - self.assertEqual(expected_result, result) - mock_get_vm_state.assert_called_once_with(fake_vm.name) - fake_check_win_vers.assert_called_once_with(10, 0) - - def test_check_if_hotplug_available(self): - self._test_check_hotplug_available(expected_result=True) - - def test_check_if_hotplug_available_gen1(self): - self._test_check_hotplug_available( - expected_result=False, vm_gen=constants.VM_GEN_1) - - def test_check_if_hotplug_available_win_6_3(self): - self._test_check_hotplug_available( - expected_result=False, windows_version=self._WIN_VERSION_6_3) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_attach_interface(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = True - fake_vm = fake_instance.fake_instance_obj(self.context) - fake_vif = test_virtual_interface.fake_vif - - self._vmops.attach_interface(fake_vm, fake_vif) - - mock_check_hotplug_available.assert_called_once_with(fake_vm) - self._vmops._vif_driver.plug.assert_called_once_with( - fake_vm, fake_vif) - self._vmops._vmutils.create_nic.assert_called_once_with( - fake_vm.name, fake_vif['id'], fake_vif['address']) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_attach_interface_failed(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = False - self.assertRaises(exception.InterfaceAttachFailed, - self._vmops.attach_interface, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = True - fake_vm = fake_instance.fake_instance_obj(self.context) - fake_vif = test_virtual_interface.fake_vif - - self._vmops.detach_interface(fake_vm, fake_vif) - - mock_check_hotplug_available.assert_called_once_with(fake_vm) - self._vmops._vif_driver.unplug.assert_called_once_with( - fake_vm, fake_vif) - self._vmops._vmutils.destroy_nic.assert_called_once_with( - fake_vm.name, fake_vif['id']) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface_failed(self, mock_check_hotplug_available): - mock_check_hotplug_available.return_value = False - self.assertRaises(exception.InterfaceDetachFailed, - self._vmops.detach_interface, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch.object(vmops.VMOps, '_check_hotplug_available') - def test_detach_interface_missing_instance(self, mock_check_hotplug): - mock_check_hotplug.side_effect = os_win_exc.HyperVVMNotFoundException( - vm_name='fake_vm') - self.assertRaises(exception.InterfaceDetachFailed, - self._vmops.detach_interface, - mock.MagicMock(), mock.sentinel.fake_vif) - - @mock.patch('nova.virt.configdrive.required_by') - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - @mock.patch.object(vmops.VMOps, 'get_image_vm_generation') - @mock.patch.object(vmops.VMOps, '_attach_drive') - @mock.patch.object(vmops.VMOps, '_create_config_drive') - @mock.patch.object(vmops.VMOps, 'attach_config_drive') - @mock.patch.object(vmops.VMOps, '_detach_config_drive') - @mock.patch.object(vmops.VMOps, 'power_on') - def test_rescue_instance(self, mock_power_on, - mock_detach_config_drive, - mock_attach_config_drive, - mock_create_config_drive, - mock_attach_drive, - mock_get_image_vm_gen, - mock_create_root_vhd, - mock_configdrive_required): - mock_image_meta = mock.MagicMock() - mock_vm_gen = constants.VM_GEN_2 - mock_instance = fake_instance.fake_instance_obj(self.context) - - mock_configdrive_required.return_value = True - mock_create_root_vhd.return_value = mock.sentinel.rescue_vhd_path - mock_get_image_vm_gen.return_value = mock_vm_gen - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._pathutils.lookup_root_vhd_path.return_value = ( - mock.sentinel.root_vhd_path) - mock_create_config_drive.return_value = ( - mock.sentinel.rescue_configdrive_path) - - self._vmops.rescue_instance(self.context, - mock_instance, - mock.sentinel.network_info, - mock_image_meta, - mock.sentinel.rescue_password) - - mock_get_image_vm_gen.assert_called_once_with( - mock_instance.uuid, mock_image_meta) - self._vmops._vmutils.detach_vm_disk.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, - is_physical=False) - mock_attach_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.rescue_vhd_path, 0, - self._vmops._ROOT_DISK_CTRL_ADDR, - vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen]) - self._vmops._vmutils.attach_scsi_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, - drive_type=constants.DISK) - mock_detach_config_drive.assert_called_once_with(mock_instance.name) - mock_create_config_drive.assert_called_once_with( - self.context, mock_instance, - injected_files=None, - admin_password=mock.sentinel.rescue_password, - network_info=mock.sentinel.network_info, - rescue=True) - mock_attach_config_drive.assert_called_once_with( - mock_instance, mock.sentinel.rescue_configdrive_path, - mock_vm_gen) - - @mock.patch.object(vmops.VMOps, '_create_root_vhd') - @mock.patch.object(vmops.VMOps, 'get_image_vm_generation') - @mock.patch.object(vmops.VMOps, 'unrescue_instance') - def _test_rescue_instance_exception(self, mock_unrescue, - mock_get_image_vm_gen, - mock_create_root_vhd, - wrong_vm_gen=False, - boot_from_volume=False, - expected_exc=None): - mock_vm_gen = constants.VM_GEN_1 - image_vm_gen = (mock_vm_gen - if not wrong_vm_gen else constants.VM_GEN_2) - mock_image_meta = mock.MagicMock() - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_get_image_vm_gen.return_value = image_vm_gen - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._pathutils.lookup_root_vhd_path.return_value = ( - mock.sentinel.root_vhd_path if not boot_from_volume else None) - - self.assertRaises(expected_exc, - self._vmops.rescue_instance, - self.context, mock_instance, - mock.sentinel.network_info, - mock_image_meta, - mock.sentinel.rescue_password) - mock_unrescue.assert_called_once_with(mock_instance) - - def test_rescue_instance_wrong_vm_gen(self): - # Test the case when the rescue image requires a different - # vm generation than the actual rescued instance. - self._test_rescue_instance_exception( - wrong_vm_gen=True, - expected_exc=exception.ImageUnacceptable) - - def test_rescue_instance_boot_from_volume(self): - # Rescuing instances booted from volume is not supported. - self._test_rescue_instance_exception( - boot_from_volume=True, - expected_exc=exception.InstanceNotRescuable) - - @mock.patch.object(fileutils, 'delete_if_exists') - @mock.patch.object(vmops.VMOps, '_attach_drive') - @mock.patch.object(vmops.VMOps, 'attach_config_drive') - @mock.patch.object(vmops.VMOps, '_detach_config_drive') - @mock.patch.object(vmops.VMOps, 'power_on') - @mock.patch.object(vmops.VMOps, 'power_off') - def test_unrescue_instance(self, mock_power_on, mock_power_off, - mock_detach_config_drive, - mock_attach_configdrive, - mock_attach_drive, - mock_delete_if_exists): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_vm_gen = constants.VM_GEN_2 - - self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen - self._vmops._vmutils.is_disk_attached.return_value = False - self._vmops._pathutils.lookup_root_vhd_path.side_effect = ( - mock.sentinel.root_vhd_path, mock.sentinel.rescue_vhd_path) - self._vmops._pathutils.lookup_configdrive_path.return_value = ( - mock.sentinel.configdrive_path) - - self._vmops.unrescue_instance(mock_instance) - - self._vmops._pathutils.lookup_root_vhd_path.assert_has_calls( - [mock.call(mock_instance.name), - mock.call(mock_instance.name, rescue=True)]) - self._vmops._vmutils.detach_vm_disk.assert_has_calls( - [mock.call(mock_instance.name, - mock.sentinel.root_vhd_path, - is_physical=False), - mock.call(mock_instance.name, - mock.sentinel.rescue_vhd_path, - is_physical=False)]) - mock_attach_drive.assert_called_once_with( - mock_instance.name, mock.sentinel.root_vhd_path, 0, - self._vmops._ROOT_DISK_CTRL_ADDR, - vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen]) - mock_detach_config_drive.assert_called_once_with(mock_instance.name, - rescue=True, - delete=True) - mock_delete_if_exists.assert_called_once_with( - mock.sentinel.rescue_vhd_path) - self._vmops._vmutils.is_disk_attached.assert_called_once_with( - mock.sentinel.configdrive_path, - is_physical=False) - mock_attach_configdrive.assert_called_once_with( - mock_instance, mock.sentinel.configdrive_path, mock_vm_gen) - mock_power_on.assert_called_once_with(mock_instance) - - @mock.patch.object(vmops.VMOps, 'power_off') - def test_unrescue_instance_missing_root_image(self, mock_power_off): - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.vm_state = vm_states.RESCUED - self._vmops._pathutils.lookup_root_vhd_path.return_value = None - - self.assertRaises(exception.InstanceNotRescuable, - self._vmops.unrescue_instance, - mock_instance) - - @mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops') - @mock.patch.object(vmops.VMOps, '_get_scoped_flavor_extra_specs') - @mock.patch.object(vmops.VMOps, '_get_instance_local_disks') - def test_set_instance_disk_qos_specs(self, mock_get_local_disks, - mock_get_scoped_specs, - mock_bytes_per_sec_to_iops): - fake_total_bytes_sec = 8 - fake_total_iops_sec = 1 - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_local_disks = [mock.sentinel.root_vhd_path, - mock.sentinel.eph_vhd_path] - - mock_get_local_disks.return_value = mock_local_disks - mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs - mock_get_scoped_specs.return_value = dict( - disk_total_bytes_sec=fake_total_bytes_sec) - mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec - - self._vmops._set_instance_disk_qos_specs(mock_instance) - - mock_bytes_per_sec_to_iops.assert_called_once_with( - fake_total_bytes_sec) - - mock_get_local_disks.assert_called_once_with(mock_instance.name) - expected_calls = [mock.call(disk_path, fake_total_iops_sec) - for disk_path in mock_local_disks] - mock_set_qos_specs.assert_has_calls(expected_calls) - - def test_get_instance_local_disks(self): - fake_instance_dir = 'fake_instance_dir' - fake_local_disks = [os.path.join(fake_instance_dir, disk_name) - for disk_name in ['root.vhd', 'configdrive.iso']] - fake_instance_disks = ['fake_remote_disk'] + fake_local_disks - - mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths - mock_get_storage_paths.return_value = [fake_instance_disks, []] - mock_get_instance_dir = self._vmops._pathutils.get_instance_dir - mock_get_instance_dir.return_value = fake_instance_dir - - ret_val = self._vmops._get_instance_local_disks( - mock.sentinel.instance_name) - - self.assertEqual(fake_local_disks, ret_val) - - def test_get_scoped_flavor_extra_specs(self): - # The flavor extra specs dict contains only string values. - fake_total_bytes_sec = '8' - - mock_instance = fake_instance.fake_instance_obj(self.context) - mock_instance.flavor.extra_specs = { - 'spec_key': 'spec_value', - 'quota:total_bytes_sec': fake_total_bytes_sec} - - ret_val = self._vmops._get_scoped_flavor_extra_specs( - mock_instance, scope='quota') - - expected_specs = { - 'total_bytes_sec': fake_total_bytes_sec - } - self.assertEqual(expected_specs, ret_val) diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py deleted file mode 100644 index f289d0363201..000000000000 --- a/nova/tests/unit/virt/hyperv/test_volumeops.py +++ /dev/null @@ -1,645 +0,0 @@ -# Copyright 2014 Cloudbase Solutions Srl -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from os_brick.initiator import connector -from oslo_config import cfg -from oslo_utils import units - -from nova import exception -from nova import test -from nova.tests.unit import fake_block_device -from nova.tests.unit.virt.hyperv import test_base -from nova.virt.hyperv import constants -from nova.virt.hyperv import volumeops - -CONF = cfg.CONF - -connection_data = {'volume_id': 'fake_vol_id', - 'target_lun': mock.sentinel.fake_lun, - 'target_iqn': mock.sentinel.fake_iqn, - 'target_portal': mock.sentinel.fake_portal, - 'auth_method': 'chap', - 'auth_username': mock.sentinel.fake_user, - 'auth_password': mock.sentinel.fake_pass} - - -def get_fake_block_dev_info(): - return {'block_device_mapping': [ - fake_block_device.AnonFakeDbBlockDeviceDict({'source_type': 'volume'})] - } - - -def get_fake_connection_info(**kwargs): - return {'data': dict(connection_data, **kwargs), - 'serial': mock.sentinel.serial} - - -class VolumeOpsTestCase(test_base.HyperVBaseTestCase): - """Unit tests for VolumeOps class.""" - - def setUp(self): - super(VolumeOpsTestCase, self).setUp() - self._volumeops = volumeops.VolumeOps() - self._volumeops._volutils = mock.MagicMock() - self._volumeops._vmutils = mock.Mock() - - def test_get_volume_driver(self): - fake_conn_info = {'driver_volume_type': mock.sentinel.fake_driver_type} - self._volumeops.volume_drivers[mock.sentinel.fake_driver_type] = ( - mock.sentinel.fake_driver) - - result = self._volumeops._get_volume_driver( - connection_info=fake_conn_info) - self.assertEqual(mock.sentinel.fake_driver, result) - - def test_get_volume_driver_exception(self): - fake_conn_info = {'driver_volume_type': 'fake_driver'} - self.assertRaises(exception.VolumeDriverNotFound, - self._volumeops._get_volume_driver, - connection_info=fake_conn_info) - - @mock.patch.object(volumeops.VolumeOps, 'attach_volume') - def test_attach_volumes(self, mock_attach_volume): - block_device_info = get_fake_block_dev_info() - - self._volumeops.attach_volumes( - block_device_info['block_device_mapping'], - mock.sentinel.instance_name) - - mock_attach_volume.assert_called_once_with( - block_device_info['block_device_mapping'][0]['connection_info'], - mock.sentinel.instance_name) - - def test_fix_instance_volume_disk_paths_empty_bdm(self): - self._volumeops.fix_instance_volume_disk_paths( - mock.sentinel.instance_name, - block_device_info={}) - self.assertFalse( - self._volumeops._vmutils.get_vm_physical_disk_mapping.called) - - @mock.patch.object(volumeops.VolumeOps, 'get_disk_path_mapping') - def test_fix_instance_volume_disk_paths(self, mock_get_disk_path_mapping): - block_device_info = get_fake_block_dev_info() - - mock_disk1 = { - 'mounted_disk_path': mock.sentinel.mounted_disk1_path, - 'resource_path': mock.sentinel.resource1_path - } - mock_disk2 = { - 'mounted_disk_path': mock.sentinel.mounted_disk2_path, - 'resource_path': mock.sentinel.resource2_path - } - - mock_vm_disk_mapping = { - mock.sentinel.disk1_serial: mock_disk1, - mock.sentinel.disk2_serial: mock_disk2 - } - # In this case, only the first disk needs to be updated. - mock_phys_disk_path_mapping = { - mock.sentinel.disk1_serial: mock.sentinel.actual_disk1_path, - mock.sentinel.disk2_serial: mock.sentinel.mounted_disk2_path - } - - vmutils = self._volumeops._vmutils - vmutils.get_vm_physical_disk_mapping.return_value = ( - mock_vm_disk_mapping) - - mock_get_disk_path_mapping.return_value = mock_phys_disk_path_mapping - - self._volumeops.fix_instance_volume_disk_paths( - mock.sentinel.instance_name, - block_device_info) - - vmutils.get_vm_physical_disk_mapping.assert_called_once_with( - mock.sentinel.instance_name) - mock_get_disk_path_mapping.assert_called_once_with( - block_device_info) - vmutils.set_disk_host_res.assert_called_once_with( - mock.sentinel.resource1_path, - mock.sentinel.actual_disk1_path) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_disconnect_volumes(self, mock_get_volume_driver): - block_device_info = get_fake_block_dev_info() - block_device_mapping = block_device_info['block_device_mapping'] - fake_volume_driver = mock_get_volume_driver.return_value - - self._volumeops.disconnect_volumes(block_device_info) - fake_volume_driver.disconnect_volume.assert_called_once_with( - block_device_mapping[0]['connection_info'], force=False) - - # Verify force=True - fake_volume_driver.disconnect_volume.reset_mock() - self._volumeops.disconnect_volumes(block_device_info, force=True) - fake_volume_driver.disconnect_volume.assert_called_once_with( - block_device_mapping[0]['connection_info'], force=True) - - @mock.patch('time.sleep') - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def _test_attach_volume(self, mock_get_volume_driver, mock_sleep, - attach_failed): - fake_conn_info = get_fake_connection_info( - qos_specs=mock.sentinel.qos_specs) - fake_volume_driver = mock_get_volume_driver.return_value - - expected_try_count = 1 - if attach_failed: - expected_try_count += CONF.hyperv.volume_attach_retry_count - - fake_volume_driver.set_disk_qos_specs.side_effect = ( - test.TestingException) - - self.assertRaises(exception.VolumeAttachFailed, - self._volumeops.attach_volume, - fake_conn_info, - mock.sentinel.inst_name, - mock.sentinel.disk_bus) - else: - self._volumeops.attach_volume( - fake_conn_info, - mock.sentinel.inst_name, - mock.sentinel.disk_bus) - - mock_get_volume_driver.assert_any_call( - fake_conn_info) - fake_volume_driver.attach_volume.assert_has_calls( - [mock.call(fake_conn_info, - mock.sentinel.inst_name, - mock.sentinel.disk_bus)] * expected_try_count) - fake_volume_driver.set_disk_qos_specs.assert_has_calls( - [mock.call(fake_conn_info, - mock.sentinel.qos_specs)] * expected_try_count) - - if attach_failed: - fake_volume_driver.disconnect_volume.assert_called_once_with( - fake_conn_info, force=False) - mock_sleep.assert_has_calls( - [mock.call(CONF.hyperv.volume_attach_retry_interval)] * - CONF.hyperv.volume_attach_retry_count) - else: - mock_sleep.assert_not_called() - - def test_attach_volume(self): - self._test_attach_volume(attach_failed=False) - - def test_attach_volume_exc(self): - self._test_attach_volume(attach_failed=True) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_disconnect_volume(self, mock_get_volume_driver): - fake_volume_driver = mock_get_volume_driver.return_value - - self._volumeops.disconnect_volume(mock.sentinel.conn_info) - - mock_get_volume_driver.assert_called_once_with( - mock.sentinel.conn_info) - fake_volume_driver.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_info, force=False) - - # Verify force=True - fake_volume_driver.disconnect_volume.reset_mock() - self._volumeops.disconnect_volume(mock.sentinel.conn_info, force=True) - fake_volume_driver.disconnect_volume.assert_called_once_with( - mock.sentinel.conn_info, force=True) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_detach_volume(self, mock_get_volume_driver): - fake_volume_driver = mock_get_volume_driver.return_value - fake_conn_info = {'data': 'fake_conn_info_data'} - - self._volumeops.detach_volume(fake_conn_info, - mock.sentinel.inst_name) - - mock_get_volume_driver.assert_called_once_with( - fake_conn_info) - fake_volume_driver.detach_volume.assert_called_once_with( - fake_conn_info, mock.sentinel.inst_name) - fake_volume_driver.disconnect_volume.assert_called_once_with( - fake_conn_info) - - @mock.patch.object(connector, 'get_connector_properties') - def test_get_volume_connector(self, mock_get_connector): - conn = self._volumeops.get_volume_connector() - - mock_get_connector.assert_called_once_with( - root_helper=None, - my_ip=CONF.my_block_storage_ip, - multipath=CONF.hyperv.use_multipath_io, - enforce_multipath=True, - host=CONF.host) - self.assertEqual(mock_get_connector.return_value, conn) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_connect_volumes(self, mock_get_volume_driver): - block_device_info = get_fake_block_dev_info() - - self._volumeops.connect_volumes(block_device_info) - - init_vol_conn = ( - mock_get_volume_driver.return_value.connect_volume) - init_vol_conn.assert_called_once_with( - block_device_info['block_device_mapping'][0]['connection_info']) - - @mock.patch.object(volumeops.VolumeOps, - 'get_disk_resource_path') - def test_get_disk_path_mapping(self, mock_get_disk_path): - block_device_info = get_fake_block_dev_info() - block_device_mapping = block_device_info['block_device_mapping'] - fake_conn_info = get_fake_connection_info() - block_device_mapping[0]['connection_info'] = fake_conn_info - - mock_get_disk_path.return_value = mock.sentinel.disk_path - - resulted_disk_path_mapping = self._volumeops.get_disk_path_mapping( - block_device_info) - - mock_get_disk_path.assert_called_once_with(fake_conn_info) - expected_disk_path_mapping = { - mock.sentinel.serial: mock.sentinel.disk_path - } - self.assertEqual(expected_disk_path_mapping, - resulted_disk_path_mapping) - - @mock.patch.object(volumeops.VolumeOps, '_get_volume_driver') - def test_get_disk_resource_path(self, mock_get_volume_driver): - fake_conn_info = get_fake_connection_info() - fake_volume_driver = mock_get_volume_driver.return_value - - resulted_disk_path = self._volumeops.get_disk_resource_path( - fake_conn_info) - - mock_get_volume_driver.assert_called_once_with(fake_conn_info) - get_mounted_disk = fake_volume_driver.get_disk_resource_path - get_mounted_disk.assert_called_once_with(fake_conn_info) - self.assertEqual(get_mounted_disk.return_value, - resulted_disk_path) - - def test_bytes_per_sec_to_iops(self): - no_bytes = 15 * units.Ki - expected_iops = 2 - - resulted_iops = self._volumeops.bytes_per_sec_to_iops(no_bytes) - self.assertEqual(expected_iops, resulted_iops) - - @mock.patch.object(volumeops.LOG, 'warning') - def test_validate_qos_specs(self, mock_warning): - supported_qos_specs = [mock.sentinel.spec1, mock.sentinel.spec2] - requested_qos_specs = {mock.sentinel.spec1: mock.sentinel.val, - mock.sentinel.spec3: mock.sentinel.val2} - - self._volumeops.validate_qos_specs(requested_qos_specs, - supported_qos_specs) - self.assertTrue(mock_warning.called) - - -class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for Hyper-V BaseVolumeDriver class.""" - - def setUp(self): - super(BaseVolumeDriverTestCase, self).setUp() - - self._base_vol_driver = volumeops.BaseVolumeDriver() - - self._base_vol_driver._diskutils = mock.Mock() - self._base_vol_driver._vmutils = mock.Mock() - self._base_vol_driver._migrutils = mock.Mock() - self._base_vol_driver._conn = mock.Mock() - self._vmutils = self._base_vol_driver._vmutils - self._migrutils = self._base_vol_driver._migrutils - self._diskutils = self._base_vol_driver._diskutils - self._conn = self._base_vol_driver._conn - - @mock.patch.object(connector.InitiatorConnector, 'factory') - def test_connector(self, mock_conn_factory): - self._base_vol_driver._conn = None - self._base_vol_driver._protocol = mock.sentinel.protocol - self._base_vol_driver._extra_connector_args = dict( - fake_conn_arg=mock.sentinel.conn_val) - - conn = self._base_vol_driver._connector - - self.assertEqual(mock_conn_factory.return_value, conn) - mock_conn_factory.assert_called_once_with( - protocol=mock.sentinel.protocol, - root_helper=None, - use_multipath=CONF.hyperv.use_multipath_io, - device_scan_attempts=CONF.hyperv.mounted_disk_query_retry_count, - device_scan_interval=( - CONF.hyperv.mounted_disk_query_retry_interval), - **self._base_vol_driver._extra_connector_args) - - def test_connect_volume(self): - conn_info = get_fake_connection_info() - - dev_info = self._base_vol_driver.connect_volume(conn_info) - expected_dev_info = self._conn.connect_volume.return_value - - self.assertEqual(expected_dev_info, dev_info) - self._conn.connect_volume.assert_called_once_with( - conn_info['data']) - - def test_disconnect_volume(self): - conn_info = get_fake_connection_info() - - self._base_vol_driver.disconnect_volume(conn_info) - - self._conn.disconnect_volume.assert_called_once_with( - conn_info['data'], force=False) - - # Verify force=True - self._conn.disconnect_volume.reset_mock() - self._base_vol_driver.disconnect_volume(conn_info, force=True) - self._conn.disconnect_volume.assert_called_once_with( - conn_info['data'], force=True) - - @mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path') - def _test_get_disk_resource_path_by_conn_info(self, - mock_get_disk_res_path, - disk_found=True): - conn_info = get_fake_connection_info() - mock_vol_paths = [mock.sentinel.disk_path] if disk_found else [] - self._conn.get_volume_paths.return_value = mock_vol_paths - - if disk_found: - disk_res_path = self._base_vol_driver.get_disk_resource_path( - conn_info) - - self._conn.get_volume_paths.assert_called_once_with( - conn_info['data']) - self.assertEqual(mock_get_disk_res_path.return_value, - disk_res_path) - mock_get_disk_res_path.assert_called_once_with( - mock.sentinel.disk_path) - else: - self.assertRaises(exception.DiskNotFound, - self._base_vol_driver.get_disk_resource_path, - conn_info) - - def test_get_existing_disk_res_path(self): - self._test_get_disk_resource_path_by_conn_info() - - def test_get_unfound_disk_res_path(self): - self._test_get_disk_resource_path_by_conn_info(disk_found=False) - - def test_get_block_dev_res_path(self): - self._base_vol_driver._is_block_dev = True - - mock_get_dev_number = ( - self._diskutils.get_device_number_from_device_name) - mock_get_dev_number.return_value = mock.sentinel.dev_number - self._vmutils.get_mounted_disk_by_drive_number.return_value = ( - mock.sentinel.disk_path) - - disk_path = self._base_vol_driver._get_disk_res_path( - mock.sentinel.dev_name) - - mock_get_dev_number.assert_called_once_with(mock.sentinel.dev_name) - self._vmutils.get_mounted_disk_by_drive_number.assert_called_once_with( - mock.sentinel.dev_number) - - self.assertEqual(mock.sentinel.disk_path, disk_path) - - def test_get_virt_disk_res_path(self): - # For virtual disk images, we expect the resource path to be the - # actual image path, as opposed to passthrough disks, in which case we - # need the Msvm_DiskDrive resource path when attaching it to a VM. - self._base_vol_driver._is_block_dev = False - - path = self._base_vol_driver._get_disk_res_path( - mock.sentinel.disk_path) - self.assertEqual(mock.sentinel.disk_path, path) - - @mock.patch.object(volumeops.BaseVolumeDriver, - '_get_disk_res_path') - @mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_ctrl_and_slot') - @mock.patch.object(volumeops.BaseVolumeDriver, - 'connect_volume') - def _test_attach_volume(self, mock_connect_volume, - mock_get_disk_ctrl_and_slot, - mock_get_disk_res_path, - is_block_dev=True): - connection_info = get_fake_connection_info() - self._base_vol_driver._is_block_dev = is_block_dev - mock_connect_volume.return_value = dict(path=mock.sentinel.raw_path) - - mock_get_disk_res_path.return_value = ( - mock.sentinel.disk_path) - mock_get_disk_ctrl_and_slot.return_value = ( - mock.sentinel.ctrller_path, - mock.sentinel.slot) - - self._base_vol_driver.attach_volume( - connection_info=connection_info, - instance_name=mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - if is_block_dev: - self._vmutils.attach_volume_to_controller.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.ctrller_path, - mock.sentinel.slot, - mock.sentinel.disk_path, - serial=connection_info['serial']) - else: - self._vmutils.attach_drive.assert_called_once_with( - mock.sentinel.instance_name, - mock.sentinel.disk_path, - mock.sentinel.ctrller_path, - mock.sentinel.slot) - - mock_get_disk_res_path.assert_called_once_with( - mock.sentinel.raw_path) - mock_get_disk_ctrl_and_slot.assert_called_once_with( - mock.sentinel.instance_name, mock.sentinel.disk_bus) - - def test_attach_volume_image_file(self): - self._test_attach_volume(is_block_dev=False) - - def test_attach_volume_block_dev(self): - self._test_attach_volume(is_block_dev=True) - - def test_detach_volume_planned_vm(self): - self._base_vol_driver.detach_volume(mock.sentinel.connection_info, - mock.sentinel.inst_name) - self._vmutils.detach_vm_disk.assert_not_called() - - @mock.patch.object(volumeops.BaseVolumeDriver, - 'get_disk_resource_path') - def test_detach_volume(self, mock_get_disk_resource_path): - self._migrutils.planned_vm_exists.return_value = False - connection_info = get_fake_connection_info() - - self._base_vol_driver.detach_volume(connection_info, - mock.sentinel.instance_name) - - mock_get_disk_resource_path.assert_called_once_with( - connection_info) - self._vmutils.detach_vm_disk.assert_called_once_with( - mock.sentinel.instance_name, - mock_get_disk_resource_path.return_value, - is_physical=self._base_vol_driver._is_block_dev) - - def test_get_disk_ctrl_and_slot_ide(self): - ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot( - mock.sentinel.instance_name, - disk_bus=constants.CTRL_TYPE_IDE) - - expected_ctrl = self._vmutils.get_vm_ide_controller.return_value - expected_slot = 0 - - self._vmutils.get_vm_ide_controller.assert_called_once_with( - mock.sentinel.instance_name, 0) - - self.assertEqual(expected_ctrl, ctrl) - self.assertEqual(expected_slot, slot) - - def test_get_disk_ctrl_and_slot_scsi(self): - ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot( - mock.sentinel.instance_name, - disk_bus=constants.CTRL_TYPE_SCSI) - - expected_ctrl = self._vmutils.get_vm_scsi_controller.return_value - expected_slot = ( - self._vmutils.get_free_controller_slot.return_value) - - self._vmutils.get_vm_scsi_controller.assert_called_once_with( - mock.sentinel.instance_name) - self._vmutils.get_free_controller_slot( - self._vmutils.get_vm_scsi_controller.return_value) - - self.assertEqual(expected_ctrl, ctrl) - self.assertEqual(expected_slot, slot) - - def test_set_disk_qos_specs(self): - # This base method is a noop, we'll just make sure - # it doesn't error out. - self._base_vol_driver.set_disk_qos_specs( - mock.sentinel.conn_info, mock.sentinel.disk_qos_spes) - - -class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for Hyper-V BaseVolumeDriver class.""" - - def test_extra_conn_args(self): - fake_iscsi_initiator = ( - 'PCI\\VEN_1077&DEV_2031&SUBSYS_17E8103C&REV_02\\' - '4&257301f0&0&0010_0') - self.flags(iscsi_initiator_list=[fake_iscsi_initiator], - group='hyperv') - expected_extra_conn_args = dict( - initiator_list=[fake_iscsi_initiator]) - - vol_driver = volumeops.ISCSIVolumeDriver() - - self.assertEqual(expected_extra_conn_args, - vol_driver._extra_connector_args) - - -class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase): - """Unit tests for the Hyper-V SMBFSVolumeDriver class.""" - - _FAKE_EXPORT_PATH = '//ip/share/' - _FAKE_CONN_INFO = get_fake_connection_info(export=_FAKE_EXPORT_PATH) - - def setUp(self): - super(SMBFSVolumeDriverTestCase, self).setUp() - self._volume_driver = volumeops.SMBFSVolumeDriver() - self._volume_driver._conn = mock.Mock() - self._conn = self._volume_driver._conn - - def test_get_export_path(self): - export_path = self._volume_driver._get_export_path( - self._FAKE_CONN_INFO) - expected_path = self._FAKE_EXPORT_PATH.replace('/', '\\') - self.assertEqual(expected_path, export_path) - - @mock.patch.object(volumeops.BaseVolumeDriver, 'attach_volume') - def test_attach_volume(self, mock_attach): - # The tested method will just apply a lock before calling - # the superclass method. - self._volume_driver.attach_volume( - self._FAKE_CONN_INFO, - mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - mock_attach.assert_called_once_with( - self._FAKE_CONN_INFO, - mock.sentinel.instance_name, - disk_bus=mock.sentinel.disk_bus) - - @mock.patch.object(volumeops.BaseVolumeDriver, 'detach_volume') - def test_detach_volume(self, mock_detach): - self._volume_driver.detach_volume( - self._FAKE_CONN_INFO, - instance_name=mock.sentinel.instance_name) - - mock_detach.assert_called_once_with( - self._FAKE_CONN_INFO, - instance_name=mock.sentinel.instance_name) - - @mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops') - @mock.patch.object(volumeops.VolumeOps, 'validate_qos_specs') - @mock.patch.object(volumeops.BaseVolumeDriver, 'get_disk_resource_path') - def test_set_disk_qos_specs(self, mock_get_disk_path, - mock_validate_qos_specs, - mock_bytes_per_sec_to_iops): - fake_total_bytes_sec = 8 - fake_total_iops_sec = 1 - - storage_qos_specs = {'total_bytes_sec': fake_total_bytes_sec} - expected_supported_specs = ['total_iops_sec', 'total_bytes_sec'] - mock_set_qos_specs = self._volume_driver._vmutils.set_disk_qos_specs - mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec - mock_get_disk_path.return_value = mock.sentinel.disk_path - - self._volume_driver.set_disk_qos_specs(self._FAKE_CONN_INFO, - storage_qos_specs) - - mock_validate_qos_specs.assert_called_once_with( - storage_qos_specs, expected_supported_specs) - mock_bytes_per_sec_to_iops.assert_called_once_with( - fake_total_bytes_sec) - mock_get_disk_path.assert_called_once_with(self._FAKE_CONN_INFO) - mock_set_qos_specs.assert_called_once_with( - mock.sentinel.disk_path, - fake_total_iops_sec) - - -class RBDVolumeDriver(test_base.HyperVBaseTestCase): - def test_get_vol_driver(self): - self._volumeops = volumeops.VolumeOps() - self._volumeops._volutils = mock.MagicMock() - self._volumeops._vmutils = mock.Mock() - - connection_info = get_fake_connection_info() - connection_info['driver_volume_type'] = 'rbd' - - drv = self._volumeops._get_volume_driver(connection_info) - - # Not much to test here. The Hyper-V driver volume attach code - # is mostly generic and all the RBD related plumbing is handled - # by os-brick. - # - # We'll just ensure that the RBD driver can be retrieved and that it - # has the right fields. - self.assertTrue(drv._is_block_dev) - self.assertEqual('rbd', drv._protocol) - # Hyper-V requires a virtual SCSI disk so we'll ask for a - # local attach. - self.assertEqual(dict(do_local_attach=True), - drv._extra_connector_args) diff --git a/nova/virt/hyperv/README.rst b/nova/virt/hyperv/README.rst deleted file mode 100644 index 808e2cf29dae..000000000000 --- a/nova/virt/hyperv/README.rst +++ /dev/null @@ -1,44 +0,0 @@ -Hyper-V Volumes Management -============================================= - -To enable the volume features, the first thing that needs to be done is to -enable the iSCSI service on the Windows compute nodes and set it to start -automatically. - -sc config msiscsi start= auto -net start msiscsi - -In Windows Server 2012, it's important to execute the following commands to -prevent having the volumes being online by default: - -diskpart -san policy=OfflineAll -exit - -How to check if your iSCSI configuration is working properly: - -On your OpenStack controller: - -1. Create a volume with e.g. "nova volume-create 1" and note the generated -volume id - -On Windows: - -2. iscsicli QAddTargetPortal -3. iscsicli ListTargets - -The output should contain the iqn related to your volume: -iqn.2010-10.org.openstack:volume- - -How to test Boot from volume in Hyper-V from the OpenStack dashboard: - -1. Fist of all create a volume -2. Get the volume ID of the created volume -3. Upload and untar to the Cloud controller the next VHD image: -http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz -4. sudo dd if=/path/to/vhdfileofstep3 -of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2 -5. Launch an instance from any image (this is not important because we are -just booting from a volume) from the dashboard, and don't forget to select -boot from volume and select the volume created in step2. Important: Device -name must be "vda". diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py deleted file mode 100644 index 475333111bd0..000000000000 --- a/nova/virt/hyperv/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2014 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.virt.hyperv import driver - -HyperVDriver = driver.HyperVDriver diff --git a/nova/virt/hyperv/block_device_manager.py b/nova/virt/hyperv/block_device_manager.py deleted file mode 100644 index 626c87c85686..000000000000 --- a/nova/virt/hyperv/block_device_manager.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) 2016 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handling of block device information and mapping - -Module contains helper methods for dealing with block device information -""" - -import itertools - -from os_win import constants as os_win_const - -from nova import block_device -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.virt import configdrive -from nova.virt import driver -from nova.virt.hyperv import constants -from nova.virt.hyperv import volumeops - - -class BlockDeviceInfoManager(object): - - _VALID_BUS = {constants.VM_GEN_1: (constants.CTRL_TYPE_IDE, - constants.CTRL_TYPE_SCSI), - constants.VM_GEN_2: (constants.CTRL_TYPE_SCSI,)} - - _DEFAULT_BUS = constants.CTRL_TYPE_SCSI - - _TYPE_FOR_DISK_FORMAT = {'vhd': constants.DISK, - 'vhdx': constants.DISK, - 'iso': constants.DVD} - - _DEFAULT_ROOT_DEVICE = '/dev/sda' - - def __init__(self): - self._volops = volumeops.VolumeOps() - - @staticmethod - def _get_device_bus(bdm): - """Determines the device bus and it's hypervisor assigned address. - """ - if bdm['disk_bus'] == constants.CTRL_TYPE_SCSI: - address = ':'.join(['0', '0', str(bdm['drive_addr']), - str(bdm['ctrl_disk_addr'])]) - return objects.SCSIDeviceBus(address=address) - elif bdm['disk_bus'] == constants.CTRL_TYPE_IDE: - address = ':'.join([str(bdm['drive_addr']), - str(bdm['ctrl_disk_addr'])]) - return objects.IDEDeviceBus(address=address) - - def get_bdm_metadata(self, context, instance, block_device_info): - """Builds a metadata object for instance devices, that maps the user - provided tag to the hypervisor assigned device address. - """ - # block_device_info does not contain tags information. - bdm_obj_list = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - # create a map between BDM object and its device name. - bdm_obj_map = {bdm.device_name: bdm for bdm in bdm_obj_list} - - bdm_metadata = [] - for bdm in itertools.chain(block_device_info['block_device_mapping'], - block_device_info['ephemerals'], - [block_device_info['root_disk']]): - # NOTE(claudiub): ephemerals have device_name instead of - # mount_device. - device_name = bdm.get('mount_device') or bdm.get('device_name') - bdm_obj = bdm_obj_map.get(device_name) - - if bdm_obj and 'tag' in bdm_obj and bdm_obj.tag: - bus = self._get_device_bus(bdm) - device = objects.DiskMetadata(bus=bus, - serial=bdm_obj.volume_id, - tags=[bdm_obj.tag]) - bdm_metadata.append(device) - - return bdm_metadata - - def _initialize_controller_slot_counter(self, instance, vm_gen): - # we have 2 IDE controllers, for a total of 4 slots - free_slots_by_device_type = { - constants.CTRL_TYPE_IDE: [ - os_win_const.IDE_CONTROLLER_SLOTS_NUMBER] * 2, - constants.CTRL_TYPE_SCSI: [ - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER] - } - if configdrive.required_by(instance): - if vm_gen == constants.VM_GEN_1: - # reserve one slot for the config drive on the second - # controller in case of generation 1 virtual machines - free_slots_by_device_type[constants.CTRL_TYPE_IDE][1] -= 1 - return free_slots_by_device_type - - def validate_and_update_bdi(self, instance, image_meta, vm_gen, - block_device_info): - slot_map = self._initialize_controller_slot_counter(instance, vm_gen) - self._check_and_update_root_device(vm_gen, image_meta, - block_device_info, slot_map) - self._check_and_update_ephemerals(vm_gen, block_device_info, slot_map) - self._check_and_update_volumes(vm_gen, block_device_info, slot_map) - - if vm_gen == constants.VM_GEN_2 and configdrive.required_by(instance): - # for Generation 2 VMs, the configdrive is attached to the SCSI - # controller. Check that there is still a slot available for it. - if slot_map[constants.CTRL_TYPE_SCSI][0] == 0: - msg = _("There are no more free slots on controller %s for " - "configdrive.") % constants.CTRL_TYPE_SCSI - raise exception.InvalidBDMFormat(details=msg) - - def _check_and_update_root_device(self, vm_gen, image_meta, - block_device_info, slot_map): - # either booting from volume, or booting from image/iso - root_disk = {} - - root_device = driver.block_device_info_get_root_device( - block_device_info) - root_device = root_device or self._DEFAULT_ROOT_DEVICE - - if self.is_boot_from_volume(block_device_info): - root_volume = self._get_root_device_bdm( - block_device_info, root_device) - root_disk['type'] = constants.VOLUME - root_disk['path'] = None - root_disk['connection_info'] = root_volume['connection_info'] - else: - root_disk['type'] = self._TYPE_FOR_DISK_FORMAT.get( - image_meta.disk_format) - if root_disk['type'] is None: - raise exception.InvalidImageFormat( - format=image_meta.disk_format) - root_disk['path'] = None - root_disk['connection_info'] = None - - root_disk['disk_bus'] = (constants.CTRL_TYPE_IDE if - vm_gen == constants.VM_GEN_1 else constants.CTRL_TYPE_SCSI) - (root_disk['drive_addr'], - root_disk['ctrl_disk_addr']) = self._get_available_controller_slot( - root_disk['disk_bus'], slot_map) - root_disk['boot_index'] = 0 - root_disk['mount_device'] = root_device - - block_device_info['root_disk'] = root_disk - - def _get_available_controller_slot(self, controller_type, slot_map): - max_slots = (os_win_const.IDE_CONTROLLER_SLOTS_NUMBER if - controller_type == constants.CTRL_TYPE_IDE else - os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER) - for idx, ctrl in enumerate(slot_map[controller_type]): - if slot_map[controller_type][idx] >= 1: - drive_addr = idx - ctrl_disk_addr = max_slots - slot_map[controller_type][idx] - slot_map[controller_type][idx] -= 1 - return (drive_addr, ctrl_disk_addr) - - msg = _("There are no more free slots on controller %s" - ) % controller_type - raise exception.InvalidBDMFormat(details=msg) - - def is_boot_from_volume(self, block_device_info): - if block_device_info: - root_device = block_device_info.get('root_device_name') - if not root_device: - root_device = self._DEFAULT_ROOT_DEVICE - - return block_device.volume_in_mapping(root_device, - block_device_info) - - def _get_root_device_bdm(self, block_device_info, mount_device=None): - for mapping in driver.block_device_info_get_mapping(block_device_info): - if mapping['mount_device'] == mount_device: - return mapping - - def _check_and_update_ephemerals(self, vm_gen, block_device_info, - slot_map): - ephemerals = driver.block_device_info_get_ephemerals(block_device_info) - for eph in ephemerals: - self._check_and_update_bdm(slot_map, vm_gen, eph) - - def _check_and_update_volumes(self, vm_gen, block_device_info, slot_map): - volumes = driver.block_device_info_get_mapping(block_device_info) - root_device_name = block_device_info['root_disk']['mount_device'] - root_bdm = self._get_root_device_bdm(block_device_info, - root_device_name) - if root_bdm: - volumes.remove(root_bdm) - for vol in volumes: - self._check_and_update_bdm(slot_map, vm_gen, vol) - - def _check_and_update_bdm(self, slot_map, vm_gen, bdm): - disk_bus = bdm.get('disk_bus') - if not disk_bus: - bdm['disk_bus'] = self._DEFAULT_BUS - elif disk_bus not in self._VALID_BUS[vm_gen]: - msg = _("Hyper-V does not support bus type %(disk_bus)s " - "for generation %(vm_gen)s instances." - ) % {'disk_bus': disk_bus, - 'vm_gen': vm_gen} - raise exception.InvalidDiskInfo(reason=msg) - - device_type = bdm.get('device_type') - if not device_type: - bdm['device_type'] = 'disk' - elif device_type != 'disk': - msg = _("Hyper-V does not support disk type %s for ephemerals " - "or volumes.") % device_type - raise exception.InvalidDiskInfo(reason=msg) - - (bdm['drive_addr'], - bdm['ctrl_disk_addr']) = self._get_available_controller_slot( - bdm['disk_bus'], slot_map) - - # make sure that boot_index is set. - bdm['boot_index'] = bdm.get('boot_index') - - def _sort_by_boot_order(self, bd_list): - # we sort the block devices by boot_index leaving the ones that don't - # have a specified boot_index at the end - bd_list.sort(key=lambda x: (x['boot_index'] is None, x['boot_index'])) - - def get_boot_order(self, vm_gen, block_device_info): - if vm_gen == constants.VM_GEN_1: - return self._get_boot_order_gen1(block_device_info) - else: - return self._get_boot_order_gen2(block_device_info) - - def _get_boot_order_gen1(self, block_device_info): - if block_device_info['root_disk']['type'] == 'iso': - return [os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - else: - return [os_win_const.BOOT_DEVICE_HARDDISK, - os_win_const.BOOT_DEVICE_CDROM, - os_win_const.BOOT_DEVICE_NETWORK, - os_win_const.BOOT_DEVICE_FLOPPY] - - def _get_boot_order_gen2(self, block_device_info): - devices = [block_device_info['root_disk']] - devices += driver.block_device_info_get_ephemerals( - block_device_info) - devices += driver.block_device_info_get_mapping(block_device_info) - - self._sort_by_boot_order(devices) - - boot_order = [] - for dev in devices: - if dev.get('connection_info'): - dev_path = self._volops.get_disk_resource_path( - dev['connection_info']) - boot_order.append(dev_path) - else: - boot_order.append(dev['path']) - - return boot_order diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py deleted file mode 100644 index 851dd7076dee..000000000000 --- a/nova/virt/hyperv/constants.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Constants used in ops classes -""" - -from os_win import constants -from oslo_utils import units - -from nova.compute import power_state -from nova.objects import fields as obj_fields - -HYPERV_POWER_STATE = { - constants.HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN, - constants.HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN, - constants.HYPERV_VM_STATE_ENABLED: power_state.RUNNING, - constants.HYPERV_VM_STATE_PAUSED: power_state.PAUSED, - constants.HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED -} - -WMI_WIN32_PROCESSOR_ARCHITECTURE = { - constants.ARCH_I686: obj_fields.Architecture.I686, - constants.ARCH_MIPS: obj_fields.Architecture.MIPS, - constants.ARCH_ALPHA: obj_fields.Architecture.ALPHA, - constants.ARCH_PPC: obj_fields.Architecture.PPC, - constants.ARCH_ARMV7: obj_fields.Architecture.ARMV7, - constants.ARCH_IA64: obj_fields.Architecture.IA64, - constants.ARCH_X86_64: obj_fields.Architecture.X86_64, -} - - -CTRL_TYPE_IDE = "IDE" -CTRL_TYPE_SCSI = "SCSI" - -DISK = "VHD" -DISK_FORMAT = DISK -DVD = "DVD" -DVD_FORMAT = "ISO" -VOLUME = "VOLUME" - -DISK_FORMAT_MAP = { - DISK_FORMAT.lower(): DISK, - DVD_FORMAT.lower(): DVD -} - -BDI_DEVICE_TYPE_TO_DRIVE_TYPE = {'disk': DISK} - -DISK_FORMAT_VHD = "VHD" -DISK_FORMAT_VHDX = "VHDX" - -HOST_POWER_ACTION_SHUTDOWN = "shutdown" -HOST_POWER_ACTION_REBOOT = "reboot" -HOST_POWER_ACTION_STARTUP = "startup" - -FLAVOR_SPEC_SECURE_BOOT = "os:secure_boot" -IMAGE_PROP_VM_GEN_1 = "hyperv-gen1" -IMAGE_PROP_VM_GEN_2 = "hyperv-gen2" - -VM_GEN_1 = 1 -VM_GEN_2 = 2 - -SERIAL_CONSOLE_BUFFER_SIZE = 4 * units.Ki - -SERIAL_PORT_TYPE_RO = 'ro' -SERIAL_PORT_TYPE_RW = 'rw' - -# The default serial console port number used for -# logging and interactive sessions. -DEFAULT_SERIAL_CONSOLE_PORT = 1 - -FLAVOR_ESPEC_REMOTEFX_RES = 'os:resolution' -FLAVOR_ESPEC_REMOTEFX_MONITORS = 'os:monitors' -FLAVOR_ESPEC_REMOTEFX_VRAM = 'os:vram' - -IOPS_BASE_SIZE = 8 * units.Ki - -STORAGE_PROTOCOL_ISCSI = 'iscsi' -STORAGE_PROTOCOL_FC = 'fibre_channel' -STORAGE_PROTOCOL_SMBFS = 'smbfs' -STORAGE_PROTOCOL_RBD = 'rbd' diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py deleted file mode 100644 index d0f95dd549cc..000000000000 --- a/nova/virt/hyperv/driver.py +++ /dev/null @@ -1,385 +0,0 @@ -# Copyright (c) 2010 Cloud.com, Inc -# Copyright (c) 2012 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A Hyper-V Nova Compute driver. -""" - -import functools -import platform -import sys - -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging - -from nova import context as nova_context -from nova import exception -from nova import objects -from nova.virt import driver -from nova.virt.hyperv import eventhandler -from nova.virt.hyperv import hostops -from nova.virt.hyperv import imagecache -from nova.virt.hyperv import livemigrationops -from nova.virt.hyperv import migrationops -from nova.virt.hyperv import rdpconsoleops -from nova.virt.hyperv import serialconsoleops -from nova.virt.hyperv import snapshotops -from nova.virt.hyperv import vmops -from nova.virt.hyperv import volumeops - - -LOG = logging.getLogger(__name__) - - -def convert_exceptions(function, exception_map): - expected_exceptions = tuple(exception_map.keys()) - - @functools.wraps(function) - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except expected_exceptions as ex: - raised_exception = exception_map.get(type(ex)) - if not raised_exception: - # exception might be a subclass of an expected exception. - for expected in expected_exceptions: - if isinstance(ex, expected): - raised_exception = exception_map[expected] - break - - exc_info = sys.exc_info() - # NOTE(claudiub): The original message will be maintained - # by passing the original exception. - exc = raised_exception(str(exc_info[1])) - raise exc.with_traceback(exc_info[2]) - return wrapper - - -def decorate_all_methods(decorator, *args, **kwargs): - def decorate(cls): - for attr in cls.__dict__: - class_member = getattr(cls, attr) - if callable(class_member): - setattr(cls, attr, decorator(class_member, *args, **kwargs)) - return cls - - return decorate - - -exception_conversion_map = { - # expected_exception: converted_exception - os_win_exc.OSWinException: exception.NovaException, - os_win_exc.HyperVVMNotFoundException: exception.InstanceNotFound, -} - -# NOTE(claudiub): the purpose of the decorator below is to prevent any -# os_win exceptions (subclasses of OSWinException) to leak outside of the -# HyperVDriver. - - -@decorate_all_methods(convert_exceptions, exception_conversion_map) -class HyperVDriver(driver.ComputeDriver): - capabilities = { - "has_imagecache": True, - "supports_evacuate": False, - "supports_migrate_to_same_host": False, - "supports_attach_interface": True, - "supports_device_tagging": True, - "supports_multiattach": False, - "supports_trusted_certs": False, - "supports_pcpus": False, - "supports_accelerators": False, - "supports_secure_boot": True, - "supports_remote_managed_ports": False, - "supports_address_space_passthrough": False, - "supports_address_space_emulated": False, - - # Supported image types - "supports_image_type_vhd": True, - "supports_image_type_vhdx": True, - } - - def __init__(self, virtapi): - # check if the current version of Windows is supported before any - # further driver initialisation. - self._check_minimum_windows_version() - - super(HyperVDriver, self).__init__(virtapi) - - self._hostops = hostops.HostOps() - self._volumeops = volumeops.VolumeOps() - self._vmops = vmops.VMOps(virtapi) - self._snapshotops = snapshotops.SnapshotOps() - self._livemigrationops = livemigrationops.LiveMigrationOps() - self._migrationops = migrationops.MigrationOps() - self._rdpconsoleops = rdpconsoleops.RDPConsoleOps() - self._serialconsoleops = serialconsoleops.SerialConsoleOps() - self._imagecache = imagecache.ImageCache() - - def _check_minimum_windows_version(self): - hostutils = utilsfactory.get_hostutils() - if not hostutils.check_min_windows_version(6, 2): - # the version is of Windows is older than Windows Server 2012 R2. - # Log an error, letting users know that this version is not - # supported any longer. - LOG.error('You are running nova-compute on an unsupported ' - 'version of Windows (older than Windows / Hyper-V ' - 'Server 2012). The support for this version of ' - 'Windows has been removed in Mitaka.') - raise exception.HypervisorTooOld(version='6.2') - elif not hostutils.check_min_windows_version(6, 3): - # TODO(claudiub): replace the warning with an exception in Rocky. - LOG.warning('You are running nova-compute on Windows / Hyper-V ' - 'Server 2012. The support for this version of Windows ' - 'has been deprecated In Queens, and will be removed ' - 'in Rocky.') - - def init_host(self, host): - LOG.warning( - 'The hyperv driver is not tested by the OpenStack project nor ' - 'does it have clear maintainer(s) and thus its quality can not be ' - 'ensured. It should be considered experimental and may be removed ' - 'in a future release. If you are using the driver in production ' - 'please let us know via the openstack-discuss mailing list.' - ) - - self._serialconsoleops.start_console_handlers() - event_handler = eventhandler.InstanceEventHandler( - state_change_callback=self.emit_event) - event_handler.start_listener() - - def list_instance_uuids(self): - return self._vmops.list_instance_uuids() - - def list_instances(self): - return self._vmops.list_instances() - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, allocations, network_info=None, - block_device_info=None, power_on=True, accel_info=None): - self._vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info) - - def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None, - accel_info=None): - self._vmops.reboot(instance, network_info, reboot_type) - - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, destroy_secrets=True): - self._vmops.destroy(instance, network_info, block_device_info, - destroy_disks) - - def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None, destroy_vifs=True, - destroy_secrets=True): - """Cleanup after instance being destroyed by Hypervisor.""" - self.unplug_vifs(instance, network_info) - - def get_info(self, instance, use_cache=True): - return self._vmops.get_info(instance) - - def attach_volume(self, context, connection_info, instance, mountpoint, - disk_bus=None, device_type=None, encryption=None): - return self._volumeops.attach_volume(connection_info, - instance.name) - - def detach_volume(self, context, connection_info, instance, mountpoint, - encryption=None): - return self._volumeops.detach_volume(connection_info, - instance.name) - - def get_volume_connector(self, instance): - return self._volumeops.get_volume_connector() - - def get_available_resource(self, nodename): - return self._hostops.get_available_resource() - - def get_available_nodes(self, refresh=False): - return [platform.node()] - - def host_power_action(self, action): - return self._hostops.host_power_action(action) - - def snapshot(self, context, instance, image_id, update_task_state): - self._snapshotops.snapshot(context, instance, image_id, - update_task_state) - - def pause(self, instance): - self._vmops.pause(instance) - - def unpause(self, instance): - self._vmops.unpause(instance) - - def suspend(self, context, instance): - self._vmops.suspend(instance) - - def resume(self, context, instance, network_info, block_device_info=None): - self._vmops.resume(instance) - - def power_off(self, instance, timeout=0, retry_interval=0): - self._vmops.power_off(instance, timeout, retry_interval) - - def power_on(self, context, instance, network_info, - block_device_info=None, accel_info=None): - self._vmops.power_on(instance, block_device_info, network_info) - - def resume_state_on_host_boot(self, context, instance, network_info, - block_device_info=None): - """Resume guest state when a host is booted.""" - self._vmops.resume_state_on_host_boot(context, instance, network_info, - block_device_info) - - def live_migration(self, context, instance, dest, post_method, - recover_method, block_migration=False, - migrate_data=None): - self._livemigrationops.live_migration(context, instance, dest, - post_method, recover_method, - block_migration, migrate_data) - - def rollback_live_migration_at_destination(self, context, instance, - network_info, - block_device_info, - destroy_disks=True, - migrate_data=None): - self.destroy(context, instance, network_info, block_device_info, - destroy_disks=destroy_disks) - - def pre_live_migration(self, context, instance, block_device_info, - network_info, disk_info, migrate_data): - self._livemigrationops.pre_live_migration(context, instance, - block_device_info, - network_info) - return migrate_data - - def post_live_migration(self, context, instance, block_device_info, - migrate_data=None): - self._livemigrationops.post_live_migration(context, instance, - block_device_info, - migrate_data) - - def post_live_migration_at_destination(self, context, instance, - network_info, - block_migration=False, - block_device_info=None): - self._livemigrationops.post_live_migration_at_destination( - context, - instance, - network_info, - block_migration) - - def check_can_live_migrate_destination(self, context, instance, - src_compute_info, dst_compute_info, - block_migration=False, - disk_over_commit=False): - return self._livemigrationops.check_can_live_migrate_destination( - context, instance, src_compute_info, dst_compute_info, - block_migration, disk_over_commit) - - def cleanup_live_migration_destination_check(self, context, - dest_check_data): - self._livemigrationops.cleanup_live_migration_destination_check( - context, dest_check_data) - - def check_can_live_migrate_source(self, context, instance, - dest_check_data, block_device_info=None): - return self._livemigrationops.check_can_live_migrate_source( - context, instance, dest_check_data) - - def get_instance_disk_info(self, instance, block_device_info=None): - pass - - def plug_vifs(self, instance, network_info): - """Plug VIFs into networks.""" - self._vmops.plug_vifs(instance, network_info) - - def unplug_vifs(self, instance, network_info): - """Unplug VIFs from networks.""" - self._vmops.unplug_vifs(instance, network_info) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, - timeout=0, retry_interval=0): - return self._migrationops.migrate_disk_and_power_off(context, - instance, dest, - flavor, - network_info, - block_device_info, - timeout, - retry_interval) - - def confirm_migration(self, context, migration, instance, network_info): - self._migrationops.confirm_migration(context, migration, - instance, network_info) - - def finish_revert_migration(self, context, instance, network_info, - migration, block_device_info=None, - power_on=True): - self._migrationops.finish_revert_migration(context, instance, - network_info, - block_device_info, power_on) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - allocations, block_device_info=None, power_on=True): - self._migrationops.finish_migration(context, migration, instance, - disk_info, network_info, - image_meta, resize_instance, - block_device_info, power_on) - - def get_host_ip_addr(self): - return self._hostops.get_host_ip_addr() - - def get_host_uptime(self): - return self._hostops.get_host_uptime() - - def get_rdp_console(self, context, instance): - return self._rdpconsoleops.get_rdp_console(instance) - - def get_serial_console(self, context, instance): - return self._serialconsoleops.get_serial_console(instance.name) - - def get_console_output(self, context, instance): - return self._serialconsoleops.get_console_output(instance.name) - - def manage_image_cache(self, context, all_instances): - self._imagecache.update(context, all_instances) - - def attach_interface(self, context, instance, image_meta, vif): - return self._vmops.attach_interface(instance, vif) - - def detach_interface(self, context, instance, vif): - return self._vmops.detach_interface(instance, vif) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password, block_device_info): - self._vmops.rescue_instance(context, instance, network_info, - image_meta, rescue_password) - - def unrescue( - self, - context: nova_context.RequestContext, - instance: 'objects.Instance', - ): - self._vmops.unrescue_instance(instance) - - def update_provider_tree(self, provider_tree, nodename, allocations=None): - inventory = provider_tree.data(nodename).inventory - alloc_ratios = self._get_allocation_ratios(inventory) - - self._hostops.update_provider_tree( - provider_tree, nodename, alloc_ratios, allocations) diff --git a/nova/virt/hyperv/eventhandler.py b/nova/virt/hyperv/eventhandler.py deleted file mode 100644 index e958717bb80d..000000000000 --- a/nova/virt/hyperv/eventhandler.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win import constants -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_log import log as logging - -import nova.conf -from nova import utils -from nova.virt import event as virtevent -from nova.virt.hyperv import serialconsoleops - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class InstanceEventHandler(object): - # The event listener timeout is set to 0 in order to return immediately - # and avoid blocking the thread. - _WAIT_TIMEOUT = 0 - - _TRANSITION_MAP = { - constants.HYPERV_VM_STATE_ENABLED: virtevent.EVENT_LIFECYCLE_STARTED, - constants.HYPERV_VM_STATE_DISABLED: virtevent.EVENT_LIFECYCLE_STOPPED, - constants.HYPERV_VM_STATE_PAUSED: virtevent.EVENT_LIFECYCLE_PAUSED, - constants.HYPERV_VM_STATE_SUSPENDED: - virtevent.EVENT_LIFECYCLE_SUSPENDED - } - - def __init__(self, state_change_callback=None): - self._vmutils = utilsfactory.get_vmutils() - self._listener = self._vmutils.get_vm_power_state_change_listener( - timeframe=CONF.hyperv.power_state_check_timeframe, - event_timeout=CONF.hyperv.power_state_event_polling_interval, - filtered_states=list(self._TRANSITION_MAP.keys()), - get_handler=True) - - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._state_change_callback = state_change_callback - - def start_listener(self): - utils.spawn_n(self._listener, self._event_callback) - - def _event_callback(self, instance_name, instance_power_state): - # Instance uuid set by Nova. If this is missing, we assume that - # the instance was not created by Nova and ignore the event. - instance_uuid = self._get_instance_uuid(instance_name) - if instance_uuid: - self._emit_event(instance_name, - instance_uuid, - instance_power_state) - - def _emit_event(self, instance_name, instance_uuid, instance_state): - virt_event = self._get_virt_event(instance_uuid, - instance_state) - utils.spawn_n(self._state_change_callback, virt_event) - - utils.spawn_n(self._handle_serial_console_workers, - instance_name, instance_state) - - def _handle_serial_console_workers(self, instance_name, instance_state): - if instance_state == constants.HYPERV_VM_STATE_ENABLED: - self._serial_console_ops.start_console_handler(instance_name) - else: - self._serial_console_ops.stop_console_handler(instance_name) - - def _get_instance_uuid(self, instance_name): - try: - instance_uuid = self._vmutils.get_instance_uuid(instance_name) - if not instance_uuid: - LOG.warning("Instance uuid could not be retrieved for " - "instance %s. Instance state change event " - "will be ignored.", instance_name) - return instance_uuid - except os_win_exc.HyperVVMNotFoundException: - # The instance has been deleted. - pass - - def _get_virt_event(self, instance_uuid, instance_state): - transition = self._TRANSITION_MAP[instance_state] - return virtevent.LifecycleEvent(uuid=instance_uuid, - transition=transition) diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py deleted file mode 100644 index 1c527860092f..000000000000 --- a/nova/virt/hyperv/hostops.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for host operations. -""" -import datetime -import platform -import time - -import os_resource_classes as orc -from os_win import constants as os_win_const -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import units - -from nova.compute import utils as compute_utils -import nova.conf -from nova.i18n import _ -from nova import objects -from nova.objects import fields as obj_fields -from nova.virt.hyperv import constants -from nova.virt.hyperv import pathutils - -CONF = nova.conf.CONF -LOG = logging.getLogger(__name__) - - -class HostOps(object): - def __init__(self): - self._diskutils = utilsfactory.get_diskutils() - self._hostutils = utilsfactory.get_hostutils() - self._pathutils = pathutils.PathUtils() - - def _get_cpu_info(self): - """Get the CPU information. - :returns: A dictionary containing the main properties - of the central processor in the hypervisor. - """ - cpu_info = dict() - - processors = self._hostutils.get_cpus_info() - - w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE - cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'], - 'Unknown') - cpu_info['model'] = processors[0]['Name'] - cpu_info['vendor'] = processors[0]['Manufacturer'] - - topology = dict() - topology['sockets'] = len(processors) - topology['cores'] = processors[0]['NumberOfCores'] - topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] // - processors[0]['NumberOfCores']) - cpu_info['topology'] = topology - - features = list() - for fkey, fname in os_win_const.PROCESSOR_FEATURE.items(): - if self._hostutils.is_cpu_feature_present(fkey): - features.append(fname) - cpu_info['features'] = features - - return cpu_info - - def _get_memory_info(self): - (total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info() - total_mem_mb = total_mem_kb // 1024 - free_mem_mb = free_mem_kb // 1024 - return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb) - - def _get_storage_info_gb(self): - instances_dir = self._pathutils.get_instances_dir() - (size, free_space) = self._diskutils.get_disk_capacity( - instances_dir) - - total_gb = size // units.Gi - free_gb = free_space // units.Gi - used_gb = total_gb - free_gb - return (total_gb, free_gb, used_gb) - - def _get_hypervisor_version(self): - """Get hypervisor version. - :returns: hypervisor version (ex. 6003) - """ - - # NOTE(claudiub): The hypervisor_version will be stored in the database - # as an Integer and it will be used by the scheduler, if required by - # the image property 'img_hv_requested_version'. - # The hypervisor_version will then be converted back to a version - # by splitting the int in groups of 3 digits. - # E.g.: hypervisor_version 6003 is converted to '6.3'. - version = self._hostutils.get_windows_version().split('.') - version = int(version[0]) * 1000 + int(version[1]) - LOG.debug('Windows version: %s ', version) - return version - - def _get_remotefx_gpu_info(self): - total_video_ram = 0 - available_video_ram = 0 - - if CONF.hyperv.enable_remotefx: - gpus = self._hostutils.get_remotefx_gpu_info() - for gpu in gpus: - total_video_ram += int(gpu['total_video_ram']) - available_video_ram += int(gpu['available_video_ram']) - else: - gpus = [] - - return {'total_video_ram': total_video_ram, - 'used_video_ram': total_video_ram - available_video_ram, - 'gpu_info': jsonutils.dumps(gpus)} - - def _get_host_numa_topology(self): - numa_nodes = self._hostutils.get_numa_nodes() - cells = [] - for numa_node in numa_nodes: - # Hyper-V does not support CPU pinning / mempages. - # initializing the rest of the fields. - numa_node.update(pinned_cpus=set(), mempages=[], siblings=[]) - cell = objects.NUMACell(**numa_node) - cells.append(cell) - - return objects.NUMATopology(cells=cells) - - def get_available_resource(self): - """Retrieve resource info. - - This method is called when nova-compute launches, and - as part of a periodic task. - - :returns: dictionary describing resources - - """ - LOG.debug('get_available_resource called') - - (total_mem_mb, - free_mem_mb, - used_mem_mb) = self._get_memory_info() - - (total_hdd_gb, - free_hdd_gb, - used_hdd_gb) = self._get_storage_info_gb() - - cpu_info = self._get_cpu_info() - cpu_topology = cpu_info['topology'] - vcpus = (cpu_topology['sockets'] * - cpu_topology['cores'] * - cpu_topology['threads']) - - # NOTE(claudiub): free_hdd_gb only refers to the currently free - # physical storage, it doesn't take into consideration the virtual - # sizes of the VMs' dynamic disks. This means that the VMs' disks can - # expand beyond the free_hdd_gb's value, and instances will still be - # scheduled to this compute node. - dic = {'vcpus': vcpus, - 'memory_mb': total_mem_mb, - 'memory_mb_used': used_mem_mb, - 'local_gb': total_hdd_gb, - 'local_gb_used': used_hdd_gb, - 'disk_available_least': free_hdd_gb, - 'hypervisor_type': "hyperv", - 'hypervisor_version': self._get_hypervisor_version(), - 'hypervisor_hostname': platform.node(), - 'vcpus_used': 0, - 'cpu_info': jsonutils.dumps(cpu_info), - 'supported_instances': [ - (obj_fields.Architecture.I686, - obj_fields.HVType.HYPERV, - obj_fields.VMMode.HVM), - (obj_fields.Architecture.X86_64, - obj_fields.HVType.HYPERV, - obj_fields.VMMode.HVM)], - 'numa_topology': self._get_host_numa_topology()._to_json(), - 'pci_passthrough_devices': self._get_pci_passthrough_devices(), - } - - gpu_info = self._get_remotefx_gpu_info() - dic.update(gpu_info) - return dic - - def _get_pci_passthrough_devices(self): - """Get host PCI devices information. - - Obtains PCI devices information and returns it as a JSON string. - - :returns: a JSON string containing a list of the assignable PCI - devices information. - """ - - pci_devices = self._hostutils.get_pci_passthrough_devices() - - for pci_dev in pci_devices: - # NOTE(claudiub): These fields are required by the PCI tracker. - dev_label = 'label_%(vendor_id)s_%(product_id)s' % { - 'vendor_id': pci_dev['vendor_id'], - 'product_id': pci_dev['product_id']} - - # TODO(claudiub): Find a way to associate the PCI devices with - # the NUMA nodes they are in. - pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD, - label=dev_label, - numa_node=None) - - return jsonutils.dumps(pci_devices) - - def host_power_action(self, action): - """Reboots, shuts down or powers up the host.""" - if action in [constants.HOST_POWER_ACTION_SHUTDOWN, - constants.HOST_POWER_ACTION_REBOOT]: - self._hostutils.host_power_action(action) - else: - if action == constants.HOST_POWER_ACTION_STARTUP: - raise NotImplementedError( - _("Host PowerOn is not supported by the Hyper-V driver")) - - def get_host_ip_addr(self): - host_ip = CONF.my_ip - if not host_ip: - # Return the first available address - host_ip = self._hostutils.get_local_ips()[0] - LOG.debug("Host IP address is: %s", host_ip) - return host_ip - - def get_host_uptime(self): - """Returns the host uptime.""" - - tick_count64 = self._hostutils.get_host_tick_count64() - - # format the string to match libvirt driver uptime - # Libvirt uptime returns a combination of the following - # - current host time - # - time since host is up - # - number of logged in users - # - cpu load - # Since the Windows function GetTickCount64 returns only - # the time since the host is up, returning 0s for cpu load - # and number of logged in users. - # This is done to ensure the format of the returned - # value is same as in libvirt - return "%s up %s, 0 users, load average: 0, 0, 0" % ( - str(time.strftime("%H:%M:%S")), - str(datetime.timedelta(milliseconds=int(tick_count64)))) - - def update_provider_tree(self, provider_tree, nodename, - allocation_ratios, allocations=None): - resources = self.get_available_resource() - - inventory = { - orc.VCPU: { - 'total': resources['vcpus'], - 'min_unit': 1, - 'max_unit': resources['vcpus'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.VCPU], - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': resources['memory_mb'], - 'min_unit': 1, - 'max_unit': resources['memory_mb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.MEMORY_MB], - 'reserved': CONF.reserved_host_memory_mb, - }, - # TODO(lpetrut): once #1784020 is fixed, we can skip reporting - # shared storage capacity - orc.DISK_GB: { - 'total': resources['local_gb'], - 'min_unit': 1, - 'max_unit': resources['local_gb'], - 'step_size': 1, - 'allocation_ratio': allocation_ratios[orc.DISK_GB], - 'reserved': compute_utils.convert_mb_to_ceil_gb( - CONF.reserved_host_disk_mb), - }, - } - - provider_tree.update_inventory(nodename, inventory) diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py deleted file mode 100644 index fa5d83c10c09..000000000000 --- a/nova/virt/hyperv/imagecache.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Image caching and management. -""" -import os -import re - -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units -from oslo_utils import uuidutils - -import nova.conf -from nova import exception -from nova.i18n import _ -from nova import utils -from nova.virt.hyperv import pathutils -from nova.virt import imagecache -from nova.virt import images - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class ImageCache(imagecache.ImageCacheManager): - def __init__(self): - super(ImageCache, self).__init__() - self._pathutils = pathutils.PathUtils() - self._vhdutils = utilsfactory.get_vhdutils() - - def _get_root_vhd_size_gb(self, instance): - if instance.old_flavor: - return instance.old_flavor.root_gb - else: - return instance.flavor.root_gb - - def _resize_and_cache_vhd(self, instance, vhd_path): - vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize'] - - root_vhd_size_gb = self._get_root_vhd_size_gb(instance) - root_vhd_size = root_vhd_size_gb * units.Gi - - root_vhd_internal_size = ( - self._vhdutils.get_internal_vhd_size_by_file_size( - vhd_path, root_vhd_size)) - - if root_vhd_internal_size < vhd_size: - raise exception.FlavorDiskSmallerThanImage( - flavor_size=root_vhd_size, image_size=vhd_size) - if root_vhd_internal_size > vhd_size: - path_parts = os.path.splitext(vhd_path) - resized_vhd_path = '%s_%s%s' % (path_parts[0], - root_vhd_size_gb, - path_parts[1]) - - lock_path = os.path.dirname(resized_vhd_path) - lock_name = "%s-cache.lock" % os.path.basename(resized_vhd_path) - - @utils.synchronized(name=lock_name, external=True, - lock_path=lock_path) - def copy_and_resize_vhd(): - if not self._pathutils.exists(resized_vhd_path): - try: - LOG.debug("Copying VHD %(vhd_path)s to " - "%(resized_vhd_path)s", - {'vhd_path': vhd_path, - 'resized_vhd_path': resized_vhd_path}) - self._pathutils.copyfile(vhd_path, resized_vhd_path) - LOG.debug("Resizing VHD %(resized_vhd_path)s to new " - "size %(root_vhd_size)s", - {'resized_vhd_path': resized_vhd_path, - 'root_vhd_size': root_vhd_size}) - self._vhdutils.resize_vhd(resized_vhd_path, - root_vhd_internal_size, - is_file_max_size=False) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(resized_vhd_path): - self._pathutils.remove(resized_vhd_path) - - copy_and_resize_vhd() - return resized_vhd_path - - def get_cached_image(self, context, instance, rescue_image_id=None): - image_id = rescue_image_id or instance.image_ref - - base_vhd_dir = self._pathutils.get_base_vhd_dir() - base_vhd_path = os.path.join(base_vhd_dir, image_id) - - lock_name = "%s-cache.lock" % image_id - - @utils.synchronized(name=lock_name, external=True, - lock_path=base_vhd_dir) - def fetch_image_if_not_existing(): - vhd_path = None - for format_ext in ['vhd', 'vhdx']: - test_path = base_vhd_path + '.' + format_ext - if self._pathutils.exists(test_path): - vhd_path = test_path - break - - if not vhd_path: - try: - images.fetch(context, image_id, base_vhd_path) - - format_ext = self._vhdutils.get_vhd_format(base_vhd_path) - vhd_path = base_vhd_path + '.' + format_ext.lower() - self._pathutils.rename(base_vhd_path, vhd_path) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(base_vhd_path): - self._pathutils.remove(base_vhd_path) - - return vhd_path - - vhd_path = fetch_image_if_not_existing() - - # Note: rescue images are not resized. - is_vhd = vhd_path.split('.')[-1].lower() == 'vhd' - if CONF.use_cow_images and is_vhd and not rescue_image_id: - # Resize the base VHD image as it's not possible to resize a - # differencing VHD. This does not apply to VHDX images. - resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path) - if resized_vhd_path: - return resized_vhd_path - - if rescue_image_id: - self._verify_rescue_image(instance, rescue_image_id, - vhd_path) - - return vhd_path - - def _verify_rescue_image(self, instance, rescue_image_id, - rescue_image_path): - rescue_image_info = self._vhdutils.get_vhd_info(rescue_image_path) - rescue_image_size = rescue_image_info['VirtualSize'] - flavor_disk_size = instance.flavor.root_gb * units.Gi - - if rescue_image_size > flavor_disk_size: - err_msg = _('Using a rescue image bigger than the instance ' - 'flavor disk size is not allowed. ' - 'Rescue image size: %(rescue_image_size)s. ' - 'Flavor disk size:%(flavor_disk_size)s.') % dict( - rescue_image_size=rescue_image_size, - flavor_disk_size=flavor_disk_size) - raise exception.ImageUnacceptable(reason=err_msg, - image_id=rescue_image_id) - - def get_image_details(self, context, instance): - image_id = instance.image_ref - return images.get_info(context, image_id) - - def _age_and_verify_cached_images(self, context, all_instances, base_dir): - for img in self.originals: - if img in self.used_images: - # change the timestamp on the image so as to reflect the last - # time it was used - self._update_image_timestamp(img) - elif CONF.image_cache.remove_unused_base_images: - self._remove_if_old_image(img) - - def _update_image_timestamp(self, image): - backing_files = self._get_image_backing_files(image) - for img in backing_files: - os.utime(img, None) - - def _get_image_backing_files(self, image): - base_file = self._pathutils.get_image_path(image) - if not base_file: - # not vhd or vhdx, ignore. - return [] - - backing_files = [base_file] - resize_re = re.compile('%s_[0-9]+$' % image) - for img in self.unexplained_images: - match = resize_re.match(img) - if match: - backing_files.append(self._pathutils.get_image_path(img)) - - return backing_files - - def _remove_if_old_image(self, image): - backing_files = self._get_image_backing_files(image) - max_age_seconds = ( - CONF.image_cache.remove_unused_original_minimum_age_seconds) - - for img in backing_files: - age_seconds = self._pathutils.get_age_of_file(img) - if age_seconds > max_age_seconds: - LOG.info("Removing old, unused image: %s", img) - self._remove_old_image(img) - - def _remove_old_image(self, image_path): - lock_path = os.path.dirname(image_path) - lock_name = "%s-cache.lock" % os.path.basename(image_path) - - @utils.synchronized(name=lock_name, external=True, - lock_path=lock_path) - def _image_synchronized_remove(): - self._pathutils.remove(image_path) - - _image_synchronized_remove() - - def update(self, context, all_instances): - base_vhd_dir = self._pathutils.get_base_vhd_dir() - - running = self._list_running_instances(context, all_instances) - self.used_images = running['used_images'].keys() - all_files = self._list_base_images(base_vhd_dir) - self.originals = all_files['originals'] - self.unexplained_images = all_files['unexplained_images'] - - self._age_and_verify_cached_images(context, all_instances, - base_vhd_dir) - - def _list_base_images(self, base_dir): - unexplained_images = [] - originals = [] - - for entry in os.listdir(base_dir): - file_name, extension = os.path.splitext(entry) - # extension has a leading '.'. E.g.: '.vhdx' - if extension.lstrip('.').lower() not in ['vhd', 'vhdx']: - # File is not an image. Ignore it. - # imagecache will not store images of any other formats. - continue - - if uuidutils.is_uuid_like(file_name): - originals.append(file_name) - else: - unexplained_images.append(file_name) - - return {'unexplained_images': unexplained_images, - 'originals': originals} diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py deleted file mode 100644 index 1b0f81eae041..000000000000 --- a/nova/virt/hyperv/livemigrationops.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for live migration VM operations. -""" - -from os_win import utilsfactory -from oslo_log import log as logging - -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.objects import migrate_data as migrate_data_obj -from nova.virt.hyperv import block_device_manager -from nova.virt.hyperv import imagecache -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import serialconsoleops -from nova.virt.hyperv import vmops -from nova.virt.hyperv import volumeops - -LOG = logging.getLogger(__name__) -CONF = nova.conf.CONF - - -class LiveMigrationOps(object): - def __init__(self): - self._livemigrutils = utilsfactory.get_livemigrationutils() - self._pathutils = pathutils.PathUtils() - self._vmops = vmops.VMOps() - self._volumeops = volumeops.VolumeOps() - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._imagecache = imagecache.ImageCache() - self._vmutils = utilsfactory.get_vmutils() - self._block_dev_man = block_device_manager.BlockDeviceInfoManager() - - def live_migration(self, context, instance_ref, dest, post_method, - recover_method, block_migration=False, - migrate_data=None): - LOG.debug("live_migration called", instance=instance_ref) - instance_name = instance_ref["name"] - - if migrate_data and 'is_shared_instance_path' in migrate_data: - shared_storage = migrate_data.is_shared_instance_path - else: - shared_storage = ( - self._pathutils.check_remote_instances_dir_shared(dest)) - if migrate_data: - migrate_data.is_shared_instance_path = shared_storage - else: - migrate_data = migrate_data_obj.HyperVLiveMigrateData( - is_shared_instance_path=shared_storage) - - try: - # We must make sure that the console log workers are stopped, - # otherwise we won't be able to delete / move VM log files. - self._serial_console_ops.stop_console_handler(instance_name) - - if not shared_storage: - self._pathutils.copy_vm_console_logs(instance_name, dest) - self._vmops.copy_vm_dvd_disks(instance_name, dest) - - self._livemigrutils.live_migrate_vm( - instance_name, - dest, - migrate_disks=not shared_storage) - except Exception: - LOG.exception("Live migration failed. Attempting rollback.", - instance=instance_ref) - recover_method(context, instance_ref, dest, migrate_data) - return - - LOG.debug("Calling live migration post_method for instance: %s", - instance_name) - post_method(context, instance_ref, dest, - block_migration, migrate_data) - - def pre_live_migration(self, context, instance, block_device_info, - network_info): - LOG.debug("pre_live_migration called", instance=instance) - self._livemigrutils.check_live_migration_config() - - if CONF.use_cow_images: - boot_from_volume = self._block_dev_man.is_boot_from_volume( - block_device_info) - if not boot_from_volume and instance.image_ref: - self._imagecache.get_cached_image(context, instance) - - self._volumeops.connect_volumes(block_device_info) - - disk_path_mapping = self._volumeops.get_disk_path_mapping( - block_device_info) - if disk_path_mapping: - # We create a planned VM, ensuring that volumes will remain - # attached after the VM is migrated. - self._livemigrutils.create_planned_vm(instance.name, - instance.host, - disk_path_mapping) - - def post_live_migration(self, context, instance, block_device_info, - migrate_data): - self._volumeops.disconnect_volumes(block_device_info) - - if not migrate_data.is_shared_instance_path: - self._pathutils.get_instance_dir(instance.name, - create_dir=False, - remove_dir=True) - - def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration): - LOG.debug("post_live_migration_at_destination called", - instance=instance_ref) - self._vmops.plug_vifs(instance_ref, network_info) - - def check_can_live_migrate_destination(self, ctxt, instance_ref, - src_compute_info, dst_compute_info, - block_migration=False, - disk_over_commit=False): - LOG.debug("check_can_live_migrate_destination called", - instance=instance_ref) - - migrate_data = migrate_data_obj.HyperVLiveMigrateData() - - try: - # The remote instance dir might not exist or other issue to cause - # OSError in check_remote_instances_dir_shared function - migrate_data.is_shared_instance_path = ( - self._pathutils.check_remote_instances_dir_shared( - instance_ref.host)) - except exception.FileNotFound as e: - reason = _('Unavailable instance location: %s') % e - raise exception.MigrationPreCheckError(reason=reason) - return migrate_data - - def cleanup_live_migration_destination_check(self, ctxt, dest_check_data): - LOG.debug("cleanup_live_migration_destination_check called") - - def check_can_live_migrate_source(self, ctxt, instance_ref, - dest_check_data): - LOG.debug("check_can_live_migrate_source called", - instance=instance_ref) - return dest_check_data diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py deleted file mode 100644 index 7e8683900b68..000000000000 --- a/nova/virt/hyperv/migrationops.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for migration / resize operations. -""" -import os - -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import units - -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.virt import configdrive -from nova.virt.hyperv import block_device_manager -from nova.virt.hyperv import constants -from nova.virt.hyperv import imagecache -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import vmops -from nova.virt.hyperv import volumeops - -LOG = logging.getLogger(__name__) - - -class MigrationOps(object): - def __init__(self): - self._vmutils = utilsfactory.get_vmutils() - self._vhdutils = utilsfactory.get_vhdutils() - self._pathutils = pathutils.PathUtils() - self._volumeops = volumeops.VolumeOps() - self._vmops = vmops.VMOps() - self._imagecache = imagecache.ImageCache() - self._block_dev_man = block_device_manager.BlockDeviceInfoManager() - - def _migrate_disk_files(self, instance_name, disk_files, dest): - # TODO(mikal): it would be nice if this method took a full instance, - # because it could then be passed to the log messages below. - - instance_path = self._pathutils.get_instance_dir(instance_name) - dest_path = self._pathutils.get_instance_dir(instance_name, dest) - revert_path = self._pathutils.get_instance_migr_revert_dir( - instance_name, remove_dir=True, create_dir=True) - - shared_storage = (self._pathutils.exists(dest_path) and - self._pathutils.check_dirs_shared_storage( - instance_path, dest_path)) - - try: - if shared_storage: - # Since source and target are the same, we copy the files to - # a temporary location before moving them into place. - # This applies when the migration target is the source host or - # when shared storage is used for the instance files. - dest_path = '%s_tmp' % instance_path - - self._pathutils.check_remove_dir(dest_path) - self._pathutils.makedirs(dest_path) - - for disk_file in disk_files: - LOG.debug('Copying disk "%(disk_file)s" to ' - '"%(dest_path)s"', - {'disk_file': disk_file, 'dest_path': dest_path}) - self._pathutils.copy(disk_file, dest_path) - - self._pathutils.move_folder_files(instance_path, revert_path) - - if shared_storage: - self._pathutils.move_folder_files(dest_path, instance_path) - self._pathutils.rmtree(dest_path) - except Exception: - with excutils.save_and_reraise_exception(): - self._cleanup_failed_disk_migration(instance_path, revert_path, - dest_path) - - def _cleanup_failed_disk_migration(self, instance_path, - revert_path, dest_path): - try: - if dest_path and self._pathutils.exists(dest_path): - self._pathutils.rmtree(dest_path) - if self._pathutils.exists(revert_path): - self._pathutils.move_folder_files(revert_path, instance_path) - self._pathutils.rmtree(revert_path) - except Exception as ex: - # Log and ignore this exception - LOG.exception(ex) - LOG.error("Cannot cleanup migration files") - - def _check_target_flavor(self, instance, flavor): - new_root_gb = flavor.root_gb - curr_root_gb = instance.flavor.root_gb - - if new_root_gb < curr_root_gb: - raise exception.InstanceFaultRollback( - exception.CannotResizeDisk( - reason=_("Cannot resize the root disk to a smaller size. " - "Current size: %(curr_root_gb)s GB. Requested " - "size: %(new_root_gb)s GB.") % { - 'curr_root_gb': curr_root_gb, - 'new_root_gb': new_root_gb})) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, timeout=0, - retry_interval=0): - LOG.debug("migrate_disk_and_power_off called", instance=instance) - - self._check_target_flavor(instance, flavor) - - self._vmops.power_off(instance, timeout, retry_interval) - - (disk_files, - volume_drives) = self._vmutils.get_vm_storage_paths(instance.name) - - if disk_files: - self._migrate_disk_files(instance.name, disk_files, dest) - - self._vmops.destroy(instance, network_info, - block_device_info, destroy_disks=False) - - # disk_info is not used - return "" - - def confirm_migration(self, context, migration, instance, network_info): - LOG.debug("confirm_migration called", instance=instance) - - self._pathutils.get_instance_migr_revert_dir(instance.name, - remove_dir=True) - - def _revert_migration_files(self, instance_name): - instance_path = self._pathutils.get_instance_dir( - instance_name, create_dir=False, remove_dir=True) - - revert_path = self._pathutils.get_instance_migr_revert_dir( - instance_name) - self._pathutils.rename(revert_path, instance_path) - - def _check_and_attach_config_drive(self, instance, vm_gen): - if configdrive.required_by(instance): - configdrive_path = self._pathutils.lookup_configdrive_path( - instance.name) - if configdrive_path: - self._vmops.attach_config_drive(instance, configdrive_path, - vm_gen) - else: - raise exception.ConfigDriveNotFound( - instance_uuid=instance.uuid) - - def finish_revert_migration(self, context, instance, network_info, - block_device_info=None, power_on=True): - LOG.debug("finish_revert_migration called", instance=instance) - - instance_name = instance.name - self._revert_migration_files(instance_name) - - image_meta = objects.ImageMeta.from_instance(instance) - vm_gen = self._vmops.get_image_vm_generation(instance.uuid, image_meta) - - self._block_dev_man.validate_and_update_bdi(instance, image_meta, - vm_gen, block_device_info) - root_device = block_device_info['root_disk'] - - if root_device['type'] == constants.DISK: - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) - root_device['path'] = root_vhd_path - if not root_vhd_path: - base_vhd_path = self._pathutils.get_instance_dir(instance_name) - raise exception.DiskNotFound(location=base_vhd_path) - - ephemerals = block_device_info['ephemerals'] - self._check_ephemeral_disks(instance, ephemerals) - - self._vmops.create_instance(instance, network_info, root_device, - block_device_info, vm_gen, image_meta) - - self._check_and_attach_config_drive(instance, vm_gen) - self._vmops.set_boot_order(instance_name, vm_gen, block_device_info) - if power_on: - self._vmops.power_on(instance, network_info=network_info) - - def _merge_base_vhd(self, diff_vhd_path, base_vhd_path): - base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path), - os.path.basename(base_vhd_path)) - try: - LOG.debug('Copying base disk %(base_vhd_path)s to ' - '%(base_vhd_copy_path)s', - {'base_vhd_path': base_vhd_path, - 'base_vhd_copy_path': base_vhd_copy_path}) - self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path) - - LOG.debug("Reconnecting copied base VHD " - "%(base_vhd_copy_path)s and diff " - "VHD %(diff_vhd_path)s", - {'base_vhd_copy_path': base_vhd_copy_path, - 'diff_vhd_path': diff_vhd_path}) - self._vhdutils.reconnect_parent_vhd(diff_vhd_path, - base_vhd_copy_path) - - LOG.debug("Merging differential disk %s into its parent.", - diff_vhd_path) - self._vhdutils.merge_vhd(diff_vhd_path) - - # Replace the differential VHD with the merged one - self._pathutils.rename(base_vhd_copy_path, diff_vhd_path) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(base_vhd_copy_path): - self._pathutils.remove(base_vhd_copy_path) - - def _check_resize_vhd(self, vhd_path, vhd_info, new_size): - curr_size = vhd_info['VirtualSize'] - if new_size < curr_size: - raise exception.CannotResizeDisk( - reason=_("Cannot resize the root disk to a smaller size. " - "Current size: %(curr_root_gb)s GB. Requested " - "size: %(new_root_gb)s GB.") % { - 'curr_root_gb': curr_size / units.Gi, - 'new_root_gb': new_size / units.Gi}) - elif new_size > curr_size: - self._resize_vhd(vhd_path, new_size) - - def _resize_vhd(self, vhd_path, new_size): - if vhd_path.split('.')[-1].lower() == "vhd": - LOG.debug("Getting parent disk info for disk: %s", vhd_path) - base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path) - if base_disk_path: - # A differential VHD cannot be resized. This limitation - # does not apply to the VHDX format. - self._merge_base_vhd(vhd_path, base_disk_path) - LOG.debug("Resizing disk \"%(vhd_path)s\" to new max " - "size %(new_size)s", - {'vhd_path': vhd_path, 'new_size': new_size}) - self._vhdutils.resize_vhd(vhd_path, new_size) - - def _check_base_disk(self, context, instance, diff_vhd_path, - src_base_disk_path): - base_vhd_path = self._imagecache.get_cached_image(context, instance) - - # If the location of the base host differs between source - # and target hosts we need to reconnect the base disk - if src_base_disk_path.lower() != base_vhd_path.lower(): - LOG.debug("Reconnecting copied base VHD " - "%(base_vhd_path)s and diff " - "VHD %(diff_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'diff_vhd_path': diff_vhd_path}) - self._vhdutils.reconnect_parent_vhd(diff_vhd_path, - base_vhd_path) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance=False, - block_device_info=None, power_on=True): - LOG.debug("finish_migration called", instance=instance) - - instance_name = instance.name - vm_gen = self._vmops.get_image_vm_generation(instance.uuid, image_meta) - - self._block_dev_man.validate_and_update_bdi(instance, image_meta, - vm_gen, block_device_info) - root_device = block_device_info['root_disk'] - - if root_device['type'] == constants.DISK: - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) - root_device['path'] = root_vhd_path - if not root_vhd_path: - base_vhd_path = self._pathutils.get_instance_dir(instance_name) - raise exception.DiskNotFound(location=base_vhd_path) - - root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) - src_base_disk_path = root_vhd_info.get("ParentPath") - if src_base_disk_path: - self._check_base_disk(context, instance, root_vhd_path, - src_base_disk_path) - - if resize_instance: - new_size = instance.flavor.root_gb * units.Gi - self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size) - - ephemerals = block_device_info['ephemerals'] - self._check_ephemeral_disks(instance, ephemerals, resize_instance) - - self._vmops.create_instance(instance, network_info, root_device, - block_device_info, vm_gen, image_meta) - - self._check_and_attach_config_drive(instance, vm_gen) - self._vmops.set_boot_order(instance_name, vm_gen, block_device_info) - if power_on: - self._vmops.power_on(instance, network_info=network_info) - - def _check_ephemeral_disks(self, instance, ephemerals, - resize_instance=False): - instance_name = instance.name - new_eph_gb = instance.get('ephemeral_gb', 0) - - if len(ephemerals) == 1: - # NOTE(claudiub): Resize only if there is one ephemeral. If there - # are more than 1, resizing them can be problematic. This behaviour - # also exists in the libvirt driver and it has to be addressed in - # the future. - ephemerals[0]['size'] = new_eph_gb - elif sum(eph['size'] for eph in ephemerals) != new_eph_gb: - # New ephemeral size is different from the original ephemeral size - # and there are multiple ephemerals. - LOG.warning("Cannot resize multiple ephemeral disks for instance.", - instance=instance) - - for index, eph in enumerate(ephemerals): - eph_name = "eph%s" % index - existing_eph_path = self._pathutils.lookup_ephemeral_vhd_path( - instance_name, eph_name) - - if not existing_eph_path: - eph['format'] = self._vhdutils.get_best_supported_vhd_format() - eph['path'] = self._pathutils.get_ephemeral_vhd_path( - instance_name, eph['format'], eph_name) - if not resize_instance: - # ephemerals should have existed. - raise exception.DiskNotFound(location=eph['path']) - - if eph['size']: - # create ephemerals - self._vmops.create_ephemeral_disk(instance.name, eph) - elif eph['size'] > 0: - # ephemerals exist. resize them. - eph['path'] = existing_eph_path - eph_vhd_info = self._vhdutils.get_vhd_info(eph['path']) - self._check_resize_vhd( - eph['path'], eph_vhd_info, eph['size'] * units.Gi) - else: - # ephemeral new size is 0, remove it. - self._pathutils.remove(existing_eph_path) - eph['path'] = None diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py deleted file mode 100644 index 6879f3f8532c..000000000000 --- a/nova/virt/hyperv/pathutils.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import tempfile -import time - -from os_win.utils import pathutils -from oslo_log import log as logging - -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.virt.hyperv import constants - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - -ERROR_INVALID_NAME = 123 - -# NOTE(claudiub): part of the pre-existing PathUtils is nova-specific and -# it does not belong in the os-win library. In order to ensure the same -# functionality with the least amount of changes necessary, adding as a mixin -# the os_win.pathutils.PathUtils class into this PathUtils. - - -class PathUtils(pathutils.PathUtils): - - def get_instances_dir(self, remote_server=None): - local_instance_path = os.path.normpath(CONF.instances_path) - - if remote_server and not local_instance_path.startswith(r'\\'): - if CONF.hyperv.instances_path_share: - path = CONF.hyperv.instances_path_share - else: - # Use an administrative share - path = local_instance_path.replace(':', '$') - return ('\\\\%(remote_server)s\\%(path)s' % - {'remote_server': remote_server, 'path': path}) - else: - return local_instance_path - - def _get_instances_sub_dir(self, dir_name, remote_server=None, - create_dir=True, remove_dir=False): - instances_path = self.get_instances_dir(remote_server) - path = os.path.join(instances_path, dir_name) - try: - if remove_dir: - self.check_remove_dir(path) - if create_dir: - self.check_create_dir(path) - return path - except WindowsError as ex: - if ex.winerror == ERROR_INVALID_NAME: - raise exception.AdminRequired(_( - "Cannot access \"%(instances_path)s\", make sure the " - "path exists and that you have the proper permissions. " - "In particular Nova-Compute must not be executed with the " - "builtin SYSTEM account or other accounts unable to " - "authenticate on a remote host.") % - {'instances_path': instances_path}) - raise - - def get_instance_migr_revert_dir(self, instance_name, create_dir=False, - remove_dir=False): - dir_name = '%s_revert' % instance_name - return self._get_instances_sub_dir(dir_name, None, create_dir, - remove_dir) - - def get_instance_dir(self, instance_name, remote_server=None, - create_dir=True, remove_dir=False): - return self._get_instances_sub_dir(instance_name, remote_server, - create_dir, remove_dir) - - def _lookup_vhd_path(self, instance_name, vhd_path_func, - *args, **kwargs): - vhd_path = None - for format_ext in ['vhd', 'vhdx']: - test_path = vhd_path_func(instance_name, format_ext, - *args, **kwargs) - if self.exists(test_path): - vhd_path = test_path - break - return vhd_path - - def lookup_root_vhd_path(self, instance_name, rescue=False): - return self._lookup_vhd_path(instance_name, self.get_root_vhd_path, - rescue) - - def lookup_configdrive_path(self, instance_name, rescue=False): - configdrive_path = None - for format_ext in constants.DISK_FORMAT_MAP: - test_path = self.get_configdrive_path(instance_name, format_ext, - rescue=rescue) - if self.exists(test_path): - configdrive_path = test_path - break - return configdrive_path - - def lookup_ephemeral_vhd_path(self, instance_name, eph_name): - return self._lookup_vhd_path(instance_name, - self.get_ephemeral_vhd_path, - eph_name) - - def get_root_vhd_path(self, instance_name, format_ext, rescue=False): - instance_path = self.get_instance_dir(instance_name) - image_name = 'root' - if rescue: - image_name += '-rescue' - return os.path.join(instance_path, - image_name + '.' + format_ext.lower()) - - def get_configdrive_path(self, instance_name, format_ext, - remote_server=None, rescue=False): - instance_path = self.get_instance_dir(instance_name, remote_server) - configdrive_image_name = 'configdrive' - if rescue: - configdrive_image_name += '-rescue' - return os.path.join(instance_path, - configdrive_image_name + '.' + format_ext.lower()) - - def get_ephemeral_vhd_path(self, instance_name, format_ext, eph_name): - instance_path = self.get_instance_dir(instance_name) - return os.path.join(instance_path, eph_name + '.' + format_ext.lower()) - - def get_base_vhd_dir(self): - return self._get_instances_sub_dir('_base') - - def get_export_dir(self, instance_name): - dir_name = os.path.join('export', instance_name) - return self._get_instances_sub_dir(dir_name, create_dir=True, - remove_dir=True) - - def get_vm_console_log_paths(self, instance_name, remote_server=None): - instance_dir = self.get_instance_dir(instance_name, - remote_server) - console_log_path = os.path.join(instance_dir, 'console.log') - return console_log_path, console_log_path + '.1' - - def copy_vm_console_logs(self, instance_name, dest_host): - local_log_paths = self.get_vm_console_log_paths( - instance_name) - remote_log_paths = self.get_vm_console_log_paths( - instance_name, remote_server=dest_host) - - for local_log_path, remote_log_path in zip(local_log_paths, - remote_log_paths): - if self.exists(local_log_path): - self.copy(local_log_path, remote_log_path) - - def get_image_path(self, image_name): - # Note: it is possible that the path doesn't exist - base_dir = self.get_base_vhd_dir() - for ext in ['vhd', 'vhdx']: - file_path = os.path.join(base_dir, - image_name + '.' + ext.lower()) - if self.exists(file_path): - return file_path - return None - - def get_age_of_file(self, file_name): - return time.time() - os.path.getmtime(file_name) - - def check_dirs_shared_storage(self, src_dir, dest_dir): - # Check if shared storage is being used by creating a temporary - # file at the destination path and checking if it exists at the - # source path. - LOG.debug("Checking if %(src_dir)s and %(dest_dir)s point " - "to the same location.", - dict(src_dir=src_dir, dest_dir=dest_dir)) - - try: - with tempfile.NamedTemporaryFile(dir=dest_dir) as tmp_file: - src_path = os.path.join(src_dir, - os.path.basename(tmp_file.name)) - shared_storage = os.path.exists(src_path) - except OSError as e: - raise exception.FileNotFound(str(e)) - - return shared_storage - - def check_remote_instances_dir_shared(self, dest): - # Checks if the instances dir from a remote host points - # to the same storage location as the local instances dir. - local_inst_dir = self.get_instances_dir() - remote_inst_dir = self.get_instances_dir(dest) - return self.check_dirs_shared_storage(local_inst_dir, - remote_inst_dir) diff --git a/nova/virt/hyperv/rdpconsoleops.py b/nova/virt/hyperv/rdpconsoleops.py deleted file mode 100644 index edfc1e1fa1b7..000000000000 --- a/nova/virt/hyperv/rdpconsoleops.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os_win import utilsfactory -from oslo_log import log as logging - -from nova.console import type as ctype -from nova.virt.hyperv import hostops - -LOG = logging.getLogger(__name__) - - -class RDPConsoleOps(object): - def __init__(self): - self._hostops = hostops.HostOps() - self._vmutils = utilsfactory.get_vmutils() - self._rdpconsoleutils = utilsfactory.get_rdpconsoleutils() - - def get_rdp_console(self, instance): - LOG.debug("get_rdp_console called", instance=instance) - host = self._hostops.get_host_ip_addr() - port = self._rdpconsoleutils.get_rdp_console_port() - vm_id = self._vmutils.get_vm_id(instance.name) - - LOG.debug("RDP console: %(host)s:%(port)s, %(vm_id)s", - {"host": host, "port": port, "vm_id": vm_id}) - - return ctype.ConsoleRDP( - host=host, port=port, internal_access_path=vm_id) diff --git a/nova/virt/hyperv/serialconsolehandler.py b/nova/virt/hyperv/serialconsolehandler.py deleted file mode 100644 index 81cf0a9dbfb3..000000000000 --- a/nova/virt/hyperv/serialconsolehandler.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import patcher -from os_win.utils.io import ioutils -from os_win import utilsfactory -from oslo_config import cfg -from oslo_log import log as logging - -from nova.console import serial as serial_console -from nova.console import type as ctype -from nova import exception -from nova.i18n import _ -from nova.virt.hyperv import constants -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import serialproxy - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -threading = patcher.original('threading') - - -class SerialConsoleHandler(object): - """Handles serial console ops related to a given instance.""" - - def __init__(self, instance_name): - self._vmutils = utilsfactory.get_vmutils() - self._pathutils = pathutils.PathUtils() - - self._instance_name = instance_name - self._log_path = self._pathutils.get_vm_console_log_paths( - self._instance_name)[0] - - self._client_connected = None - self._input_queue = None - self._output_queue = None - - self._serial_proxy = None - self._workers = [] - - def start(self): - self._setup_handlers() - - for worker in self._workers: - worker.start() - - def stop(self): - for worker in self._workers: - worker.stop() - - if self._serial_proxy: - serial_console.release_port(self._listen_host, - self._listen_port) - - def _setup_handlers(self): - if CONF.serial_console.enabled: - self._setup_serial_proxy_handler() - - self._setup_named_pipe_handlers() - - def _setup_serial_proxy_handler(self): - self._listen_host = ( - CONF.serial_console.proxyclient_address) - self._listen_port = serial_console.acquire_port( - self._listen_host) - - LOG.info('Initializing serial proxy on ' - '%(addr)s:%(port)s, handling connections ' - 'to instance %(instance_name)s.', - {'addr': self._listen_host, - 'port': self._listen_port, - 'instance_name': self._instance_name}) - - # Use this event in order to manage - # pending queue operations. - self._client_connected = threading.Event() - self._input_queue = ioutils.IOQueue( - client_connected=self._client_connected) - self._output_queue = ioutils.IOQueue( - client_connected=self._client_connected) - - self._serial_proxy = serialproxy.SerialProxy( - self._instance_name, self._listen_host, - self._listen_port, self._input_queue, - self._output_queue, self._client_connected) - - self._workers.append(self._serial_proxy) - - def _setup_named_pipe_handlers(self): - # At most 2 named pipes will be used to access the vm serial ports. - # - # The named pipe having the 'ro' suffix will be used only for logging - # while the 'rw' pipe will be used for interactive sessions, logging - # only when there is no 'ro' pipe. - serial_port_mapping = self._get_vm_serial_port_mapping() - log_rw_pipe_output = not serial_port_mapping.get( - constants.SERIAL_PORT_TYPE_RO) - - for pipe_type, pipe_path in serial_port_mapping.items(): - enable_logging = (pipe_type == constants.SERIAL_PORT_TYPE_RO or - log_rw_pipe_output) - handler = self._get_named_pipe_handler( - pipe_path, - pipe_type=pipe_type, - enable_logging=enable_logging) - self._workers.append(handler) - - def _get_named_pipe_handler(self, pipe_path, pipe_type, - enable_logging): - kwargs = {} - if pipe_type == constants.SERIAL_PORT_TYPE_RW: - kwargs = {'input_queue': self._input_queue, - 'output_queue': self._output_queue, - 'connect_event': self._client_connected} - if enable_logging: - kwargs['log_file'] = self._log_path - - handler = utilsfactory.get_named_pipe_handler(pipe_path, **kwargs) - return handler - - def _get_vm_serial_port_mapping(self): - serial_port_conns = self._vmutils.get_vm_serial_port_connections( - self._instance_name) - - if not serial_port_conns: - err_msg = _("No suitable serial port pipe was found " - "for instance %(instance_name)s") - raise exception.NovaException( - err_msg % {'instance_name': self._instance_name}) - - serial_port_mapping = {} - # At the moment, we tag the pipes by using a pipe path suffix - # as we can't use the serial port ElementName attribute because of - # a Hyper-V bug. - for pipe_path in serial_port_conns: - # expected pipe_path: - # '\\.\pipe\fc1bcc91-c7d3-4116-a210-0cd151e019cd_rw' - port_type = pipe_path[-2:] - if port_type in [constants.SERIAL_PORT_TYPE_RO, - constants.SERIAL_PORT_TYPE_RW]: - serial_port_mapping[port_type] = pipe_path - else: - serial_port_mapping[constants.SERIAL_PORT_TYPE_RW] = pipe_path - - return serial_port_mapping - - def get_serial_console(self): - if not CONF.serial_console.enabled: - raise exception.ConsoleTypeUnavailable(console_type='serial') - return ctype.ConsoleSerial(host=self._listen_host, - port=self._listen_port) diff --git a/nova/virt/hyperv/serialconsoleops.py b/nova/virt/hyperv/serialconsoleops.py deleted file mode 100644 index f5ea6d6063be..000000000000 --- a/nova/virt/hyperv/serialconsoleops.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2015 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import os - -from os_win import utilsfactory -from oslo_log import log as logging - -from nova import exception -from nova import utils -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import serialconsolehandler - -LOG = logging.getLogger(__name__) - -_console_handlers = {} - - -def instance_synchronized(func): - @functools.wraps(func) - def wrapper(self, instance_name, *args, **kwargs): - @utils.synchronized(instance_name) - def inner(): - return func(self, instance_name, *args, **kwargs) - return inner() - return wrapper - - -class SerialConsoleOps(object): - def __init__(self): - self._vmutils = utilsfactory.get_vmutils() - self._pathutils = pathutils.PathUtils() - - @instance_synchronized - def start_console_handler(self, instance_name): - # Cleanup existing workers. - self.stop_console_handler_unsync(instance_name) - handler = None - - try: - handler = serialconsolehandler.SerialConsoleHandler( - instance_name) - handler.start() - _console_handlers[instance_name] = handler - except Exception as exc: - LOG.error('Instance %(instance_name)s serial console handler ' - 'could not start. Exception %(exc)s', - {'instance_name': instance_name, - 'exc': exc}) - if handler: - handler.stop() - - @instance_synchronized - def stop_console_handler(self, instance_name): - self.stop_console_handler_unsync(instance_name) - - def stop_console_handler_unsync(self, instance_name): - handler = _console_handlers.get(instance_name) - if handler: - LOG.info("Stopping instance %(instance_name)s " - "serial console handler.", - {'instance_name': instance_name}) - handler.stop() - del _console_handlers[instance_name] - - @instance_synchronized - def get_serial_console(self, instance_name): - handler = _console_handlers.get(instance_name) - if not handler: - raise exception.ConsoleTypeUnavailable(console_type='serial') - return handler.get_serial_console() - - @instance_synchronized - def get_console_output(self, instance_name): - console_log_paths = self._pathutils.get_vm_console_log_paths( - instance_name) - - try: - log = b'' - # Start with the oldest console log file. - for log_path in reversed(console_log_paths): - if os.path.exists(log_path): - with open(log_path, 'rb') as fp: - log += fp.read() - return log - except IOError as err: - raise exception.ConsoleLogOutputException( - instance_id=instance_name, reason=str(err)) - - def start_console_handlers(self): - active_instances = self._vmutils.get_active_instances() - for instance_name in active_instances: - instance_path = self._pathutils.get_instance_dir(instance_name) - - # Skip instances that are not created by Nova - if not os.path.exists(instance_path): - continue - - self.start_console_handler(instance_name) diff --git a/nova/virt/hyperv/serialproxy.py b/nova/virt/hyperv/serialproxy.py deleted file mode 100644 index d12fb8bf6e85..000000000000 --- a/nova/virt/hyperv/serialproxy.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import socket - -from eventlet import patcher - -from nova import exception -from nova.i18n import _ -from nova.virt.hyperv import constants - -# Note(lpetrut): Eventlet greenpipes are not supported on Windows. The named -# pipe handlers implemented in os-win use Windows API calls which can block -# the whole thread. In order to avoid this, those workers run in separate -# 'native' threads. -# -# As this proxy communicates with those workers via queues, the serial console -# proxy workers have to run in 'native' threads as well. -threading = patcher.original('threading') - - -def handle_socket_errors(func): - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except socket.error: - self._client_connected.clear() - return wrapper - - -class SerialProxy(threading.Thread): - def __init__(self, instance_name, addr, port, input_queue, - output_queue, client_connected): - super(SerialProxy, self).__init__() - self.daemon = True - - self._instance_name = instance_name - self._addr = addr - self._port = port - self._conn = None - - self._input_queue = input_queue - self._output_queue = output_queue - self._client_connected = client_connected - self._stopped = threading.Event() - - def _setup_socket(self): - try: - self._sock = socket.socket(socket.AF_INET, - socket.SOCK_STREAM) - self._sock.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1) - self._sock.bind((self._addr, self._port)) - self._sock.listen(1) - except socket.error as err: - self._sock.close() - msg = (_('Failed to initialize serial proxy on ' - '%(addr)s:%(port)s, handling connections ' - 'to instance %(instance_name)s. Error: %(error)s') % - {'addr': self._addr, - 'port': self._port, - 'instance_name': self._instance_name, - 'error': err}) - raise exception.NovaException(msg) - - def stop(self): - self._stopped.set() - self._client_connected.clear() - if self._conn: - self._conn.shutdown(socket.SHUT_RDWR) - self._conn.close() - self._sock.close() - - def run(self): - self._setup_socket() - while not self._stopped.isSet(): - self._accept_conn() - - @handle_socket_errors - def _accept_conn(self): - self._conn, client_addr = self._sock.accept() - self._client_connected.set() - - workers = [] - for job in [self._get_data, self._send_data]: - worker = threading.Thread(target=job) - worker.daemon = True - worker.start() - workers.append(worker) - - for worker in workers: - worker_running = (worker.is_alive() and - worker is not threading.current_thread()) - if worker_running: - worker.join() - - self._conn.close() - self._conn = None - - @handle_socket_errors - def _get_data(self): - while self._client_connected.isSet(): - data = self._conn.recv(constants.SERIAL_CONSOLE_BUFFER_SIZE) - if not data: - self._client_connected.clear() - return - self._input_queue.put(data) - - @handle_socket_errors - def _send_data(self): - while self._client_connected.isSet(): - data = self._output_queue.get_burst() - if data: - self._conn.sendall(data) diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py deleted file mode 100644 index 63f9e8d708bf..000000000000 --- a/nova/virt/hyperv/snapshotops.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for VM snapshot operations. -""" -import os - -from os_win import utilsfactory -from oslo_log import log as logging - -from nova.compute import task_states -from nova.image import glance -from nova.virt.hyperv import pathutils - -LOG = logging.getLogger(__name__) - - -class SnapshotOps(object): - def __init__(self): - self._pathutils = pathutils.PathUtils() - self._vmutils = utilsfactory.get_vmutils() - self._vhdutils = utilsfactory.get_vhdutils() - - def _save_glance_image(self, context, image_id, image_vhd_path): - (glance_image_service, - image_id) = glance.get_remote_image_service(context, image_id) - image_metadata = {"disk_format": "vhd", - "container_format": "bare"} - with self._pathutils.open(image_vhd_path, 'rb') as f: - glance_image_service.update(context, image_id, image_metadata, f, - purge_props=False) - - def snapshot(self, context, instance, image_id, update_task_state): - """Create snapshot from a running VM instance.""" - instance_name = instance.name - - LOG.debug("Creating snapshot for instance %s", instance_name) - snapshot_path = self._vmutils.take_vm_snapshot(instance_name) - update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) - - export_dir = None - - try: - src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) - - LOG.debug("Getting info for VHD %s", src_vhd_path) - src_base_disk_path = self._vhdutils.get_vhd_parent_path( - src_vhd_path) - - export_dir = self._pathutils.get_export_dir(instance_name) - - dest_vhd_path = os.path.join(export_dir, os.path.basename( - src_vhd_path)) - LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s', - {'src_vhd_path': src_vhd_path, - 'dest_vhd_path': dest_vhd_path}) - self._pathutils.copyfile(src_vhd_path, dest_vhd_path) - - image_vhd_path = None - if not src_base_disk_path: - image_vhd_path = dest_vhd_path - else: - basename = os.path.basename(src_base_disk_path) - dest_base_disk_path = os.path.join(export_dir, basename) - LOG.debug('Copying base disk %(src_vhd_path)s to ' - '%(dest_base_disk_path)s', - {'src_vhd_path': src_vhd_path, - 'dest_base_disk_path': dest_base_disk_path}) - self._pathutils.copyfile(src_base_disk_path, - dest_base_disk_path) - - LOG.debug("Reconnecting copied base VHD " - "%(dest_base_disk_path)s and diff " - "VHD %(dest_vhd_path)s", - {'dest_base_disk_path': dest_base_disk_path, - 'dest_vhd_path': dest_vhd_path}) - self._vhdutils.reconnect_parent_vhd(dest_vhd_path, - dest_base_disk_path) - - LOG.debug("Merging diff disk %s into its parent.", - dest_vhd_path) - self._vhdutils.merge_vhd(dest_vhd_path) - image_vhd_path = dest_base_disk_path - - LOG.debug("Updating Glance image %(image_id)s with content from " - "merged disk %(image_vhd_path)s", - {'image_id': image_id, 'image_vhd_path': image_vhd_path}) - update_task_state(task_state=task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD) - self._save_glance_image(context, image_id, image_vhd_path) - - LOG.debug("Snapshot image %(image_id)s updated for VM " - "%(instance_name)s", - {'image_id': image_id, 'instance_name': instance_name}) - finally: - try: - LOG.debug("Removing snapshot %s", image_id) - self._vmutils.remove_vm_snapshot(snapshot_path) - except Exception: - LOG.exception('Failed to remove snapshot for VM %s', - instance_name, instance=instance) - if export_dir: - LOG.debug('Removing directory: %s', export_dir) - self._pathutils.rmtree(export_dir) diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py deleted file mode 100644 index 5cff9cbaade4..000000000000 --- a/nova/virt/hyperv/vif.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# Copyright 2013 Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os_vif -from os_win import utilsfactory - -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.network import model -from nova.network import os_vif_util - -CONF = nova.conf.CONF - - -class HyperVVIFDriver(object): - def __init__(self): - self._netutils = utilsfactory.get_networkutils() - - def plug(self, instance, vif): - vif_type = vif['type'] - if vif_type == model.VIF_TYPE_HYPERV: - # neutron takes care of plugging the port - pass - elif vif_type == model.VIF_TYPE_OVS: - vif = os_vif_util.nova_to_osvif_vif(vif) - instance = os_vif_util.nova_to_osvif_instance(instance) - - # NOTE(claudiub): the vNIC has to be connected to a vSwitch - # before the ovs port is created. - self._netutils.connect_vnic_to_vswitch(CONF.hyperv.vswitch_name, - vif.id) - os_vif.plug(vif, instance) - else: - reason = _("Failed to plug virtual interface: " - "unexpected vif_type=%s") % vif_type - raise exception.VirtualInterfacePlugException(reason) - - def unplug(self, instance, vif): - vif_type = vif['type'] - if vif_type == model.VIF_TYPE_HYPERV: - # neutron takes care of unplugging the port - pass - elif vif_type == model.VIF_TYPE_OVS: - vif = os_vif_util.nova_to_osvif_vif(vif) - instance = os_vif_util.nova_to_osvif_instance(instance) - os_vif.unplug(vif, instance) - else: - reason = _("unexpected vif_type=%s") % vif_type - raise exception.VirtualInterfaceUnplugException(reason=reason) diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py deleted file mode 100644 index 7a6695b6eff0..000000000000 --- a/nova/virt/hyperv/vmops.py +++ /dev/null @@ -1,1127 +0,0 @@ -# Copyright (c) 2010 Cloud.com, Inc -# Copyright 2012 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for basic VM operations. -""" -import contextlib -import functools -import os -import time - -from eventlet import timeout as etimeout -from os_win import constants as os_win_const -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_service import loopingcall -from oslo_utils import excutils -from oslo_utils import fileutils -from oslo_utils import units -from oslo_utils import uuidutils - -from nova.api.metadata import base as instance_metadata -from nova.compute import vm_states -import nova.conf -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import fields -from nova import version -from nova.virt import configdrive -from nova.virt import hardware -from nova.virt.hyperv import block_device_manager -from nova.virt.hyperv import constants -from nova.virt.hyperv import imagecache -from nova.virt.hyperv import pathutils -from nova.virt.hyperv import serialconsoleops -from nova.virt.hyperv import vif as vif_utils -from nova.virt.hyperv import volumeops - -LOG = logging.getLogger(__name__) - - -CONF = nova.conf.CONF - -SHUTDOWN_TIME_INCREMENT = 5 -REBOOT_TYPE_SOFT = 'SOFT' -REBOOT_TYPE_HARD = 'HARD' - -VM_GENERATIONS = { - constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1, - constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2 -} - -VM_GENERATIONS_CONTROLLER_TYPES = { - constants.VM_GEN_1: constants.CTRL_TYPE_IDE, - constants.VM_GEN_2: constants.CTRL_TYPE_SCSI -} - - -def check_admin_permissions(function): - @functools.wraps(function) - def wrapper(self, *args, **kwds): - - # Make sure the windows account has the required admin permissions. - self._vmutils.check_admin_permissions() - return function(self, *args, **kwds) - return wrapper - - -class VMOps(object): - # The console log is stored in two files, each should have at most half of - # the maximum console log size. - _MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2 - _ROOT_DISK_CTRL_ADDR = 0 - - def __init__(self, virtapi=None): - self._virtapi = virtapi - self._vmutils = utilsfactory.get_vmutils() - self._metricsutils = utilsfactory.get_metricsutils() - self._vhdutils = utilsfactory.get_vhdutils() - self._hostutils = utilsfactory.get_hostutils() - self._migrutils = utilsfactory.get_migrationutils() - self._pathutils = pathutils.PathUtils() - self._volumeops = volumeops.VolumeOps() - self._imagecache = imagecache.ImageCache() - self._serial_console_ops = serialconsoleops.SerialConsoleOps() - self._block_dev_man = ( - block_device_manager.BlockDeviceInfoManager()) - self._vif_driver = vif_utils.HyperVVIFDriver() - - def list_instance_uuids(self): - instance_uuids = [] - for (instance_name, notes) in self._vmutils.list_instance_notes(): - if notes and uuidutils.is_uuid_like(notes[0]): - instance_uuids.append(str(notes[0])) - else: - LOG.debug("Notes not found or not resembling a GUID for " - "instance: %s", instance_name) - return instance_uuids - - def list_instances(self): - return self._vmutils.list_instances() - - def get_info(self, instance): - """Get information about the VM.""" - LOG.debug("get_info called for instance", instance=instance) - - instance_name = instance.name - if not self._vmutils.vm_exists(instance_name): - raise exception.InstanceNotFound(instance_id=instance.uuid) - - info = self._vmutils.get_vm_summary_info(instance_name) - - state = constants.HYPERV_POWER_STATE[info['EnabledState']] - return hardware.InstanceInfo(state=state) - - def _create_root_device(self, context, instance, root_disk_info, vm_gen): - path = None - if root_disk_info['type'] == constants.DISK: - path = self._create_root_vhd(context, instance) - self.check_vm_image_type(instance.uuid, vm_gen, path) - root_disk_info['path'] = path - - def _create_root_vhd(self, context, instance, rescue_image_id=None): - is_rescue_vhd = rescue_image_id is not None - - base_vhd_path = self._imagecache.get_cached_image(context, instance, - rescue_image_id) - base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) - base_vhd_size = base_vhd_info['VirtualSize'] - format_ext = base_vhd_path.split('.')[-1] - root_vhd_path = self._pathutils.get_root_vhd_path(instance.name, - format_ext, - is_rescue_vhd) - root_vhd_size = instance.flavor.root_gb * units.Gi - - try: - if CONF.use_cow_images: - LOG.debug("Creating differencing VHD. Parent: " - "%(base_vhd_path)s, Target: %(root_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'root_vhd_path': root_vhd_path}, - instance=instance) - self._vhdutils.create_differencing_vhd(root_vhd_path, - base_vhd_path) - vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) - if vhd_type == constants.DISK_FORMAT_VHD: - # The base image has already been resized. As differencing - # vhdx images support it, the root image will be resized - # instead if needed. - return root_vhd_path - else: - LOG.debug("Copying VHD image %(base_vhd_path)s to target: " - "%(root_vhd_path)s", - {'base_vhd_path': base_vhd_path, - 'root_vhd_path': root_vhd_path}, - instance=instance) - self._pathutils.copyfile(base_vhd_path, root_vhd_path) - - root_vhd_internal_size = ( - self._vhdutils.get_internal_vhd_size_by_file_size( - base_vhd_path, root_vhd_size)) - - if not is_rescue_vhd and self._is_resize_needed( - root_vhd_path, base_vhd_size, - root_vhd_internal_size, instance): - self._vhdutils.resize_vhd(root_vhd_path, - root_vhd_internal_size, - is_file_max_size=False) - except Exception: - with excutils.save_and_reraise_exception(): - if self._pathutils.exists(root_vhd_path): - self._pathutils.remove(root_vhd_path) - - return root_vhd_path - - def _is_resize_needed(self, vhd_path, old_size, new_size, instance): - if new_size < old_size: - raise exception.FlavorDiskSmallerThanImage( - flavor_size=new_size, image_size=old_size) - elif new_size > old_size: - LOG.debug("Resizing VHD %(vhd_path)s to new " - "size %(new_size)s", - {'new_size': new_size, - 'vhd_path': vhd_path}, - instance=instance) - return True - return False - - def _create_ephemerals(self, instance, ephemerals): - for index, eph in enumerate(ephemerals): - eph['format'] = self._vhdutils.get_best_supported_vhd_format() - eph_name = "eph%s" % index - eph['path'] = self._pathutils.get_ephemeral_vhd_path( - instance.name, eph['format'], eph_name) - self.create_ephemeral_disk(instance.name, eph) - - def create_ephemeral_disk(self, instance_name, eph_info): - self._vhdutils.create_dynamic_vhd(eph_info['path'], - eph_info['size'] * units.Gi) - - @staticmethod - def _get_vif_metadata(context, instance_id): - vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, - instance_id) - vif_metadata = [] - for vif in vifs: - if 'tag' in vif and vif.tag: - device = objects.NetworkInterfaceMetadata( - mac=vif.address, - bus=objects.PCIDeviceBus(), - tags=[vif.tag]) - vif_metadata.append(device) - - return vif_metadata - - def _save_device_metadata(self, context, instance, block_device_info): - """Builds a metadata object for instance devices, that maps the user - provided tag to the hypervisor assigned device address. - """ - metadata = [] - - metadata.extend(self._get_vif_metadata(context, instance.uuid)) - if block_device_info: - metadata.extend(self._block_dev_man.get_bdm_metadata( - context, instance, block_device_info)) - - if metadata: - instance.device_metadata = objects.InstanceDeviceMetadata( - devices=metadata) - - def set_boot_order(self, instance_name, vm_gen, block_device_info): - boot_order = self._block_dev_man.get_boot_order( - vm_gen, block_device_info) - LOG.debug("Setting boot order for instance: %(instance_name)s: " - "%(boot_order)s", {'instance_name': instance_name, - 'boot_order': boot_order}) - - self._vmutils.set_boot_order(instance_name, boot_order) - - @check_admin_permissions - def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info=None): - """Create a new VM and start it.""" - LOG.info("Spawning new instance", instance=instance) - - instance_name = instance.name - if self._vmutils.vm_exists(instance_name): - raise exception.InstanceExists(name=instance_name) - - # Make sure we're starting with a clean slate. - self._delete_disk_files(instance_name) - - vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) - - self._block_dev_man.validate_and_update_bdi( - instance, image_meta, vm_gen, block_device_info) - root_device = block_device_info['root_disk'] - self._create_root_device(context, instance, root_device, vm_gen) - self._create_ephemerals(instance, block_device_info['ephemerals']) - - try: - with self.wait_vif_plug_events(instance, network_info): - # waiting will occur after the instance is created. - self.create_instance(instance, network_info, root_device, - block_device_info, vm_gen, image_meta) - # This is supported starting from OVS version 2.5 - self.plug_vifs(instance, network_info) - - self._save_device_metadata(context, instance, block_device_info) - - if configdrive.required_by(instance): - configdrive_path = self._create_config_drive(context, - instance, - injected_files, - admin_password, - network_info) - - self.attach_config_drive(instance, configdrive_path, vm_gen) - self.set_boot_order(instance.name, vm_gen, block_device_info) - # vifs are already plugged in at this point. We waited on the vif - # plug event previously when we created the instance. Skip the - # plug vifs during power on in this case - self.power_on(instance, - network_info=network_info, - should_plug_vifs=False) - except Exception: - with excutils.save_and_reraise_exception(): - self.destroy(instance, network_info, block_device_info) - - @contextlib.contextmanager - def wait_vif_plug_events(self, instance, network_info): - timeout = CONF.vif_plugging_timeout - - try: - # NOTE(claudiub): async calls to bind the neutron ports will be - # done when network_info is being accessed. - events = self._get_neutron_events(network_info) - with self._virtapi.wait_for_instance_event( - instance, events, deadline=timeout, - error_callback=self._neutron_failed_callback): - yield - except etimeout.Timeout: - # We never heard from Neutron - LOG.warning('Timeout waiting for vif plugging callback for ' - 'instance.', instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - except exception.PortBindingFailed: - LOG.warning( - "Neutron failed to bind a port to this host. Make sure that " - "an L2 agent is alive and registered from this node (neutron " - "Open vSwitch agent or Hyper-V agent), or make sure that " - "neutron is configured with a mechanism driver that is able " - "to bind ports to this host (OVN). If you are using neutron " - "Hyper-V agent, make sure that networking-hyperv is installed " - "on the neutron controller, and that the neutron-server was " - "configured to use the 'hyperv' mechanism_driver.") - raise - - def _neutron_failed_callback(self, event_name, instance): - LOG.error('Neutron Reported failure on event %s', - event_name, instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - - def _get_neutron_events(self, network_info): - # NOTE(danms): We need to collect any VIFs that are currently - # down that we expect a down->up event for. Anything that is - # already up will not undergo that transition, and for - # anything that might be stale (cache-wise) assume it's - # already up so we don't block on it. - if CONF.vif_plugging_timeout: - return [('network-vif-plugged', vif['id']) - for vif in network_info if vif.get('active') is False] - - return [] - - def create_instance(self, instance, network_info, root_device, - block_device_info, vm_gen, image_meta): - instance_name = instance.name - instance_path = os.path.join(CONF.instances_path, instance_name) - secure_boot_enabled = self._requires_secure_boot(instance, image_meta, - vm_gen) - - memory_per_numa_node, cpus_per_numa_node = ( - self._get_instance_vnuma_config(instance, image_meta)) - - if memory_per_numa_node: - LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning " - "has to be disabled in order for the instance to " - "benefit from it.", instance=instance) - if CONF.hyperv.dynamic_memory_ratio > 1.0: - LOG.warning( - "Instance vNUMA topology requested, but dynamic memory " - "ratio is higher than 1.0 in nova.conf. Ignoring dynamic " - "memory ratio option.", instance=instance) - dynamic_memory_ratio = 1.0 - vnuma_enabled = True - else: - dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio - vnuma_enabled = False - - if instance.pci_requests.requests: - # NOTE(claudiub): if the instance requires PCI devices, its - # host shutdown action MUST be shutdown. - host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN - else: - host_shutdown_action = None - - self._vmutils.create_vm(instance_name, - vnuma_enabled, - vm_gen, - instance_path, - [instance.uuid]) - - self._vmutils.update_vm(instance_name, - instance.flavor.memory_mb, - memory_per_numa_node, - instance.flavor.vcpus, - cpus_per_numa_node, - CONF.hyperv.limit_cpu_features, - dynamic_memory_ratio, - host_shutdown_action=host_shutdown_action, - chassis_asset_tag=version.product_string()) - - self._configure_remotefx(instance, vm_gen) - - self._vmutils.create_scsi_controller(instance_name) - self._attach_root_device(instance_name, root_device) - self._attach_ephemerals(instance_name, block_device_info['ephemerals']) - self._volumeops.attach_volumes( - block_device_info['block_device_mapping'], instance_name) - - # For the moment, we use COM port 1 when getting the serial console - # log as well as interactive sessions. In the future, the way in which - # we consume instance serial ports may become configurable. - # - # Note that Hyper-V instances will always have 2 COM ports - serial_ports = { - constants.DEFAULT_SERIAL_CONSOLE_PORT: - constants.SERIAL_PORT_TYPE_RW} - self._create_vm_com_port_pipes(instance, serial_ports) - - for vif in network_info: - LOG.debug('Creating nic for instance', instance=instance) - self._vmutils.create_nic(instance_name, - vif['id'], - vif['address']) - - if CONF.hyperv.enable_instance_metrics_collection: - self._metricsutils.enable_vm_metrics_collection(instance_name) - - self._set_instance_disk_qos_specs(instance) - - if secure_boot_enabled: - certificate_required = self._requires_certificate(image_meta) - self._vmutils.enable_secure_boot( - instance.name, msft_ca_required=certificate_required) - - self._attach_pci_devices(instance) - - def _attach_pci_devices(self, instance): - for pci_request in instance.pci_requests.requests: - spec = pci_request.spec[0] - for counter in range(pci_request.count): - self._vmutils.add_pci_device(instance.name, - spec['vendor_id'], - spec['product_id']) - - def _get_instance_vnuma_config(self, instance, image_meta): - """Returns the appropriate NUMA configuration for Hyper-V instances, - given the desired instance NUMA topology. - - :param instance: instance containing the flavor and it's extra_specs, - where the NUMA topology is defined. - :param image_meta: image's metadata, containing properties related to - the instance's NUMA topology. - :returns: memory amount and number of vCPUs per NUMA node or - (None, None), if instance NUMA topology was not requested. - :raises exception.InstanceUnacceptable: - If the given instance NUMA topology is not possible on Hyper-V, or - if CPU pinning is required. - """ - instance_topology = hardware.numa_get_constraints(instance.flavor, - image_meta) - if not instance_topology: - # instance NUMA topology was not requested. - return None, None - - memory_per_numa_node = instance_topology.cells[0].memory - cpus_per_numa_node = len(instance_topology.cells[0].cpuset) - - # TODO(stephenfin): We can avoid this check entirely if we rely on the - # 'supports_pcpus' driver capability (via a trait), but we need to drop - # support for the legacy 'vcpu_pin_set' path in the libvirt driver - # first - if instance_topology.cpu_policy not in ( - None, fields.CPUAllocationPolicy.SHARED, - ): - raise exception.InstanceUnacceptable( - reason=_("Hyper-V does not support CPU pinning."), - instance_id=instance.uuid) - - # validate that the requested NUMA topology is not asymmetric. - # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. - # same with memory. - for cell in instance_topology.cells: - if len(cell.cpuset) != cpus_per_numa_node: - reason = _("Hyper-V does not support NUMA topologies with " - "uneven number of processors. (%(a)s != %(b)s)") % { - 'a': len(cell.cpuset), 'b': cpus_per_numa_node} - raise exception.InstanceUnacceptable(reason=reason, - instance_id=instance.uuid) - if cell.memory != memory_per_numa_node: - reason = _("Hyper-V does not support NUMA topologies with " - "uneven amounts of memory. (%(a)s != %(b)s)") % { - 'a': cell.memory, 'b': memory_per_numa_node} - raise exception.InstanceUnacceptable(reason=reason, - instance_id=instance.uuid) - - return memory_per_numa_node, cpus_per_numa_node - - def _configure_remotefx(self, instance, vm_gen): - extra_specs = instance.flavor.extra_specs - remotefx_max_resolution = extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_RES) - if not remotefx_max_resolution: - # RemoteFX not required. - return - - if not CONF.hyperv.enable_remotefx: - raise exception.InstanceUnacceptable( - _("enable_remotefx configuration option needs to be set to " - "True in order to use RemoteFX.")) - - if not self._hostutils.check_server_feature( - self._hostutils.FEATURE_RDS_VIRTUALIZATION): - raise exception.InstanceUnacceptable( - _("The RDS-Virtualization feature must be installed in order " - "to use RemoteFX.")) - - if not self._vmutils.vm_gen_supports_remotefx(vm_gen): - raise exception.InstanceUnacceptable( - _("RemoteFX is not supported on generation %s virtual " - "machines on this version of Windows.") % vm_gen) - - instance_name = instance.name - LOG.debug('Configuring RemoteFX for instance: %s', instance_name) - - remotefx_monitor_count = int(extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1) - remotefx_vram = extra_specs.get( - constants.FLAVOR_ESPEC_REMOTEFX_VRAM) - vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None - - self._vmutils.enable_remotefx_video_adapter( - instance_name, - remotefx_monitor_count, - remotefx_max_resolution, - vram_bytes) - - def _attach_root_device(self, instance_name, root_dev_info): - if root_dev_info['type'] == constants.VOLUME: - self._volumeops.attach_volume(root_dev_info['connection_info'], - instance_name, - disk_bus=root_dev_info['disk_bus']) - else: - self._attach_drive(instance_name, root_dev_info['path'], - root_dev_info['drive_addr'], - root_dev_info['ctrl_disk_addr'], - root_dev_info['disk_bus'], - root_dev_info['type']) - - def _attach_ephemerals(self, instance_name, ephemerals): - for eph in ephemerals: - # if an ephemeral doesn't have a path, it might have been removed - # during resize. - if eph.get('path'): - self._attach_drive( - instance_name, eph['path'], eph['drive_addr'], - eph['ctrl_disk_addr'], eph['disk_bus'], - constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[ - eph['device_type']]) - - def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr, - controller_type, drive_type=constants.DISK): - if controller_type == constants.CTRL_TYPE_SCSI: - self._vmutils.attach_scsi_drive(instance_name, path, drive_type) - else: - self._vmutils.attach_ide_drive(instance_name, path, drive_addr, - ctrl_disk_addr, drive_type) - - def get_image_vm_generation(self, instance_id, image_meta): - default_vm_gen = self._hostutils.get_default_vm_generation() - image_prop_vm = image_meta.properties.get('hw_machine_type', - default_vm_gen) - if image_prop_vm not in self._hostutils.get_supported_vm_types(): - reason = _('Requested VM Generation %s is not supported on ' - 'this OS.') % image_prop_vm - raise exception.InstanceUnacceptable(instance_id=instance_id, - reason=reason) - - return VM_GENERATIONS[image_prop_vm] - - def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): - if (vm_gen != constants.VM_GEN_1 and root_vhd_path and - self._vhdutils.get_vhd_format( - root_vhd_path) == constants.DISK_FORMAT_VHD): - reason = _('Requested VM Generation %s, but provided VHD ' - 'instead of VHDX.') % vm_gen - raise exception.InstanceUnacceptable(instance_id=instance_id, - reason=reason) - - def _requires_certificate(self, image_meta): - os_type = image_meta.properties.get('os_type') - if os_type == fields.OSType.WINDOWS: - return False - return True - - def _requires_secure_boot(self, instance, image_meta, vm_gen): - """Checks whether the given instance requires Secure Boot. - - Secure Boot feature will be enabled by setting the "os_secure_boot" - image property or the "os:secure_boot" flavor extra spec to required. - - :raises exception.InstanceUnacceptable: if the given image_meta has - no os_type property set, or if the image property value and the - flavor extra spec value are conflicting, or if Secure Boot is - required, but the instance's VM generation is 1. - """ - img_secure_boot = image_meta.properties.get('os_secure_boot') - flavor_secure_boot = instance.flavor.extra_specs.get( - constants.FLAVOR_SPEC_SECURE_BOOT) - - requires_sb = False - conflicting_values = False - - if flavor_secure_boot == fields.SecureBoot.REQUIRED: - requires_sb = True - if img_secure_boot == fields.SecureBoot.DISABLED: - conflicting_values = True - elif img_secure_boot == fields.SecureBoot.REQUIRED: - requires_sb = True - if flavor_secure_boot == fields.SecureBoot.DISABLED: - conflicting_values = True - - if conflicting_values: - reason = _( - "Conflicting image metadata property and flavor extra_specs " - "values: os_secure_boot (%(image_secure_boot)s) / " - "os:secure_boot (%(flavor_secure_boot)s)") % { - 'image_secure_boot': img_secure_boot, - 'flavor_secure_boot': flavor_secure_boot} - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - if requires_sb: - if vm_gen != constants.VM_GEN_2: - reason = _('Secure boot requires generation 2 VM.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - - os_type = image_meta.properties.get('os_type') - if not os_type: - reason = _('For secure boot, os_type must be specified in ' - 'image properties.') - raise exception.InstanceUnacceptable(instance_id=instance.uuid, - reason=reason) - return requires_sb - - def _create_config_drive(self, context, instance, injected_files, - admin_password, network_info, rescue=False): - if CONF.config_drive_format != 'iso9660': - raise exception.ConfigDriveUnsupportedFormat( - format=CONF.config_drive_format) - - LOG.info('Using config drive for instance', instance=instance) - - extra_md = {} - if admin_password and CONF.hyperv.config_drive_inject_password: - extra_md['admin_pass'] = admin_password - - inst_md = instance_metadata.InstanceMetadata( - instance, content=injected_files, extra_md=extra_md, - network_info=network_info) - - configdrive_path_iso = self._pathutils.get_configdrive_path( - instance.name, constants.DVD_FORMAT, rescue=rescue) - LOG.info('Creating config drive at %(path)s', - {'path': configdrive_path_iso}, instance=instance) - - with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: - try: - cdb.make_drive(configdrive_path_iso) - except processutils.ProcessExecutionError as e: - with excutils.save_and_reraise_exception(): - LOG.error('Creating config drive failed with ' - 'error: %s', e, instance=instance) - - if not CONF.hyperv.config_drive_cdrom: - configdrive_path = self._pathutils.get_configdrive_path( - instance.name, constants.DISK_FORMAT_VHD, rescue=rescue) - processutils.execute(CONF.hyperv.qemu_img_cmd, - 'convert', - '-f', - 'raw', - '-O', - 'vpc', - configdrive_path_iso, - configdrive_path, - attempts=1) - self._pathutils.remove(configdrive_path_iso) - else: - configdrive_path = configdrive_path_iso - - return configdrive_path - - def attach_config_drive(self, instance, configdrive_path, vm_gen): - configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] - # Do the attach here and if there is a certain file format that isn't - # supported in constants.DISK_FORMAT_MAP then bomb out. - try: - drive_type = constants.DISK_FORMAT_MAP[configdrive_ext] - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - self._attach_drive(instance.name, configdrive_path, 1, 0, - controller_type, drive_type) - except KeyError: - raise exception.InvalidDiskFormat(disk_format=configdrive_ext) - - def _detach_config_drive(self, instance_name, rescue=False, delete=False): - configdrive_path = self._pathutils.lookup_configdrive_path( - instance_name, rescue=rescue) - - if configdrive_path: - self._vmutils.detach_vm_disk(instance_name, - configdrive_path, - is_physical=False) - if delete: - self._pathutils.remove(configdrive_path) - - @serialconsoleops.instance_synchronized - def _delete_disk_files(self, instance_name): - # We want to avoid the situation in which serial console workers - # are started while we perform this operation, preventing us from - # deleting the instance log files (bug #1556189). This can happen - # due to delayed instance lifecycle events. - # - # The unsynchronized method is being used to avoid a deadlock. - self._serial_console_ops.stop_console_handler_unsync(instance_name) - self._pathutils.get_instance_dir(instance_name, - create_dir=False, - remove_dir=True) - - def destroy(self, instance, network_info, block_device_info, - destroy_disks=True): - instance_name = instance.name - LOG.info("Got request to destroy instance", instance=instance) - try: - if self._vmutils.vm_exists(instance_name): - - # Stop the VM first. - self._vmutils.stop_vm_jobs(instance_name) - self.power_off(instance) - self._vmutils.destroy_vm(instance_name) - elif self._migrutils.planned_vm_exists(instance_name): - self._migrutils.destroy_existing_planned_vm(instance_name) - else: - LOG.debug("Instance not found", instance=instance) - - # NOTE(claudiub): The vifs should be unplugged and the volumes - # should be disconnected even if the VM doesn't exist anymore, - # so they are not leaked. - self.unplug_vifs(instance, network_info) - self._volumeops.disconnect_volumes(block_device_info, force=True) - - if destroy_disks: - self._delete_disk_files(instance_name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception('Failed to destroy instance: %s', instance_name) - - def reboot(self, instance, network_info, reboot_type): - """Reboot the specified instance.""" - LOG.debug("Rebooting instance", instance=instance) - - if reboot_type == REBOOT_TYPE_SOFT: - if self._soft_shutdown(instance): - self.power_on(instance, network_info=network_info) - return - - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_REBOOT) - - def _soft_shutdown(self, instance, - timeout=CONF.hyperv.wait_soft_reboot_seconds, - retry_interval=SHUTDOWN_TIME_INCREMENT): - """Perform a soft shutdown on the VM. - - :return: True if the instance was shutdown within time limit, - False otherwise. - """ - LOG.debug("Performing Soft shutdown on instance", instance=instance) - - while timeout > 0: - # Perform a soft shutdown on the instance. - # Wait maximum timeout for the instance to be shutdown. - # If it was not shutdown, retry until it succeeds or a maximum of - # time waited is equal to timeout. - wait_time = min(retry_interval, timeout) - try: - LOG.debug("Soft shutdown instance, timeout remaining: %d", - timeout, instance=instance) - self._vmutils.soft_shutdown_vm(instance.name) - if self._wait_for_power_off(instance.name, wait_time): - LOG.info("Soft shutdown succeeded.", - instance=instance) - return True - except os_win_exc.HyperVException as e: - # Exception is raised when trying to shutdown the instance - # while it is still booting. - LOG.debug("Soft shutdown failed: %s", e, instance=instance) - time.sleep(wait_time) - - timeout -= retry_interval - - LOG.warning("Timed out while waiting for soft shutdown.", - instance=instance) - return False - - def pause(self, instance): - """Pause VM instance.""" - LOG.debug("Pause instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_PAUSED) - - def unpause(self, instance): - """Unpause paused VM instance.""" - LOG.debug("Unpause instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_ENABLED) - - def suspend(self, instance): - """Suspend the specified instance.""" - LOG.debug("Suspend instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_SUSPENDED) - - def resume(self, instance): - """Resume the suspended VM instance.""" - LOG.debug("Resume instance", instance=instance) - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_ENABLED) - - def power_off(self, instance, timeout=0, retry_interval=0): - """Power off the specified instance.""" - LOG.debug("Power off instance", instance=instance) - - # We must make sure that the console log workers are stopped, - # otherwise we won't be able to delete or move the VM log files. - self._serial_console_ops.stop_console_handler(instance.name) - - if retry_interval <= 0: - retry_interval = SHUTDOWN_TIME_INCREMENT - - try: - if timeout and self._soft_shutdown(instance, - timeout, - retry_interval): - return - - self._set_vm_state(instance, - os_win_const.HYPERV_VM_STATE_DISABLED) - except os_win_exc.HyperVVMNotFoundException: - # The manager can call the stop API after receiving instance - # power off events. If this is triggered when the instance - # is being deleted, it might attempt to power off an unexisting - # instance. We'll just pass in this case. - LOG.debug("Instance not found. Skipping power off", - instance=instance) - - def power_on(self, instance, block_device_info=None, network_info=None, - should_plug_vifs=True): - """Power on the specified instance.""" - LOG.debug("Power on instance", instance=instance) - - if block_device_info: - self._volumeops.fix_instance_volume_disk_paths(instance.name, - block_device_info) - - if should_plug_vifs: - self.plug_vifs(instance, network_info) - self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) - - def _set_vm_state(self, instance, req_state): - instance_name = instance.name - - try: - self._vmutils.set_vm_state(instance_name, req_state) - - LOG.debug("Successfully changed state of VM %(instance_name)s" - " to: %(req_state)s", {'instance_name': instance_name, - 'req_state': req_state}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error("Failed to change vm state of %(instance_name)s" - " to %(req_state)s", - {'instance_name': instance_name, - 'req_state': req_state}) - - def _get_vm_state(self, instance_name): - summary_info = self._vmutils.get_vm_summary_info(instance_name) - return summary_info['EnabledState'] - - def _wait_for_power_off(self, instance_name, time_limit): - """Waiting for a VM to be in a disabled state. - - :return: True if the instance is shutdown within time_limit, - False otherwise. - """ - - desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED] - - def _check_vm_status(instance_name): - if self._get_vm_state(instance_name) in desired_vm_states: - raise loopingcall.LoopingCallDone() - - periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status, - instance_name) - - try: - # add a timeout to the periodic call. - periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT) - etimeout.with_timeout(time_limit, periodic_call.wait) - except etimeout.Timeout: - # VM did not shutdown in the expected time_limit. - return False - finally: - # stop the periodic call, in case of exceptions or Timeout. - periodic_call.stop() - - return True - - def resume_state_on_host_boot(self, context, instance, network_info, - block_device_info=None): - """Resume guest state when a host is booted.""" - self.power_on(instance, block_device_info, network_info) - - def _create_vm_com_port_pipes(self, instance, serial_ports): - for port_number, port_type in serial_ports.items(): - pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type) - self._vmutils.set_vm_serial_port_connection( - instance.name, port_number, pipe_path) - - def copy_vm_dvd_disks(self, vm_name, dest_host): - dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name) - dest_path = self._pathutils.get_instance_dir( - vm_name, remote_server=dest_host) - for path in dvd_disk_paths: - self._pathutils.copyfile(path, dest_path) - - def plug_vifs(self, instance, network_info): - if network_info: - for vif in network_info: - self._vif_driver.plug(instance, vif) - - def unplug_vifs(self, instance, network_info): - if network_info: - for vif in network_info: - self._vif_driver.unplug(instance, vif) - - def _check_hotplug_available(self, instance): - """Check whether attaching an interface is possible for the given - instance. - - :returns: True if attaching / detaching interfaces is possible for the - given instance. - """ - vm_state = self._get_vm_state(instance.name) - if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED: - # can attach / detach interface to stopped VMs. - return True - - if not self._hostutils.check_min_windows_version(10, 0): - # TODO(claudiub): add set log level to error after string freeze. - LOG.debug("vNIC hot plugging is supported only in newer " - "versions than Windows Hyper-V / Server 2012 R2.") - return False - - if (self._vmutils.get_vm_generation(instance.name) == - constants.VM_GEN_1): - # TODO(claudiub): add set log level to error after string freeze. - LOG.debug("Cannot hot plug vNIC to a first generation VM.", - instance=instance) - return False - - return True - - def attach_interface(self, instance, vif): - if not self._check_hotplug_available(instance): - raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) - - LOG.debug('Attaching vif: %s', vif['id'], instance=instance) - self._vmutils.create_nic(instance.name, vif['id'], vif['address']) - self._vif_driver.plug(instance, vif) - - def detach_interface(self, instance, vif): - try: - if not self._check_hotplug_available(instance): - raise exception.InterfaceDetachFailed( - instance_uuid=instance.uuid) - - LOG.debug('Detaching vif: %s', vif['id'], instance=instance) - self._vif_driver.unplug(instance, vif) - self._vmutils.destroy_nic(instance.name, vif['id']) - except os_win_exc.HyperVVMNotFoundException: - # TODO(claudiub): add set log level to error after string freeze. - LOG.debug("Instance not found during detach interface. It " - "might have been destroyed beforehand.", - instance=instance) - raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) - - def rescue_instance(self, context, instance, network_info, image_meta, - rescue_password): - try: - self._rescue_instance(context, instance, network_info, - image_meta, rescue_password) - except Exception as exc: - with excutils.save_and_reraise_exception(): - LOG.error("Instance rescue failed. Exception: %(exc)s. " - "Attempting to unrescue the instance.", - {'exc': exc}, instance=instance) - self.unrescue_instance(instance) - - def _rescue_instance(self, context, instance, network_info, image_meta, - rescue_password): - rescue_image_id = image_meta.id or instance.image_ref - rescue_vhd_path = self._create_root_vhd( - context, instance, rescue_image_id=rescue_image_id) - - rescue_vm_gen = self.get_image_vm_generation(instance.uuid, - image_meta) - vm_gen = self._vmutils.get_vm_generation(instance.name) - if rescue_vm_gen != vm_gen: - err_msg = _('The requested rescue image requires a different VM ' - 'generation than the actual rescued instance. ' - 'Rescue image VM generation: %(rescue_vm_gen)s. ' - 'Rescued instance VM generation: %(vm_gen)s.') % dict( - rescue_vm_gen=rescue_vm_gen, - vm_gen=vm_gen) - raise exception.ImageUnacceptable(reason=err_msg, - image_id=rescue_image_id) - - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) - if not root_vhd_path: - err_msg = _('Instance root disk image could not be found. ' - 'Rescuing instances booted from volume is ' - 'not supported.') - raise exception.InstanceNotRescuable(reason=err_msg, - instance_id=instance.uuid) - - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - - self._vmutils.detach_vm_disk(instance.name, root_vhd_path, - is_physical=False) - self._attach_drive(instance.name, rescue_vhd_path, 0, - self._ROOT_DISK_CTRL_ADDR, controller_type) - self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, - drive_type=constants.DISK) - - if configdrive.required_by(instance): - self._detach_config_drive(instance.name) - rescue_configdrive_path = self._create_config_drive( - context, - instance, - injected_files=None, - admin_password=rescue_password, - network_info=network_info, - rescue=True) - self.attach_config_drive(instance, rescue_configdrive_path, - vm_gen) - - self.power_on(instance) - - def unrescue_instance(self, instance): - self.power_off(instance) - - root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) - rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, - rescue=True) - - if (instance.vm_state == vm_states.RESCUED and - not (rescue_vhd_path and root_vhd_path)): - err_msg = _('Missing instance root and/or rescue image. ' - 'The instance cannot be unrescued.') - raise exception.InstanceNotRescuable(reason=err_msg, - instance_id=instance.uuid) - - vm_gen = self._vmutils.get_vm_generation(instance.name) - controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] - - self._vmutils.detach_vm_disk(instance.name, root_vhd_path, - is_physical=False) - if rescue_vhd_path: - self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, - is_physical=False) - fileutils.delete_if_exists(rescue_vhd_path) - self._attach_drive(instance.name, root_vhd_path, 0, - self._ROOT_DISK_CTRL_ADDR, controller_type) - - self._detach_config_drive(instance.name, rescue=True, delete=True) - - # Reattach the configdrive, if exists and not already attached. - configdrive_path = self._pathutils.lookup_configdrive_path( - instance.name) - if configdrive_path and not self._vmutils.is_disk_attached( - configdrive_path, is_physical=False): - self.attach_config_drive(instance, configdrive_path, vm_gen) - - self.power_on(instance) - - def _set_instance_disk_qos_specs(self, instance): - quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota') - - disk_total_bytes_sec = int( - quota_specs.get('disk_total_bytes_sec') or 0) - disk_total_iops_sec = int( - quota_specs.get('disk_total_iops_sec') or - self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec)) - - if disk_total_iops_sec: - local_disks = self._get_instance_local_disks(instance.name) - for disk_path in local_disks: - self._vmutils.set_disk_qos_specs(disk_path, - disk_total_iops_sec) - - def _get_instance_local_disks(self, instance_name): - instance_path = self._pathutils.get_instance_dir(instance_name) - instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0] - local_disks = [disk_path for disk_path in instance_disks - if instance_path in disk_path] - return local_disks - - def _get_scoped_flavor_extra_specs(self, instance, scope): - extra_specs = instance.flavor.extra_specs or {} - filtered_specs = {} - for spec, value in extra_specs.items(): - if ':' in spec: - _scope, key = spec.split(':') - if _scope == scope: - filtered_specs[key] = value - return filtered_specs diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py deleted file mode 100644 index d2bfed2441ea..000000000000 --- a/nova/virt/hyperv/volumeops.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright 2012 Pedro Navarro Perez -# Copyright 2013 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for Storage-related functions (attach, detach, etc). -""" -import time - -from os_brick.initiator import connector -from os_win import utilsfactory -from oslo_log import log as logging -from oslo_utils import strutils - -import nova.conf -from nova import exception -from nova.i18n import _ -from nova import utils -from nova.virt import driver -from nova.virt.hyperv import constants - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class VolumeOps(object): - """Management class for Volume-related tasks - """ - - def __init__(self): - self._vmutils = utilsfactory.get_vmutils() - self._default_root_device = 'vda' - self.volume_drivers = { - constants.STORAGE_PROTOCOL_SMBFS: SMBFSVolumeDriver(), - constants.STORAGE_PROTOCOL_ISCSI: ISCSIVolumeDriver(), - constants.STORAGE_PROTOCOL_FC: FCVolumeDriver(), - constants.STORAGE_PROTOCOL_RBD: RBDVolumeDriver()} - - def _get_volume_driver(self, connection_info): - driver_type = connection_info.get('driver_volume_type') - if driver_type not in self.volume_drivers: - raise exception.VolumeDriverNotFound(driver_type=driver_type) - return self.volume_drivers[driver_type] - - def attach_volumes(self, volumes, instance_name): - for vol in volumes: - self.attach_volume(vol['connection_info'], instance_name) - - def disconnect_volumes(self, block_device_info, force=False): - mapping = driver.block_device_info_get_mapping(block_device_info) - for vol in mapping: - self.disconnect_volume(vol['connection_info'], force=force) - - def attach_volume(self, connection_info, instance_name, - disk_bus=constants.CTRL_TYPE_SCSI): - tries_left = CONF.hyperv.volume_attach_retry_count + 1 - - while tries_left: - try: - self._attach_volume(connection_info, - instance_name, - disk_bus) - break - except Exception as ex: - tries_left -= 1 - if not tries_left: - LOG.exception( - "Failed to attach volume %(connection_info)s " - "to instance %(instance_name)s.", - {'connection_info': - strutils.mask_dict_password(connection_info), - 'instance_name': instance_name}) - - self.disconnect_volume(connection_info) - raise exception.VolumeAttachFailed( - volume_id=connection_info['serial'], - reason=ex) - else: - LOG.warning( - "Failed to attach volume %(connection_info)s " - "to instance %(instance_name)s. " - "Tries left: %(tries_left)s.", - {'connection_info': strutils.mask_dict_password( - connection_info), - 'instance_name': instance_name, - 'tries_left': tries_left}) - - time.sleep(CONF.hyperv.volume_attach_retry_interval) - - def _attach_volume(self, connection_info, instance_name, - disk_bus=constants.CTRL_TYPE_SCSI): - LOG.debug( - "Attaching volume: %(connection_info)s to %(instance_name)s", - {'connection_info': strutils.mask_dict_password(connection_info), - 'instance_name': instance_name}) - volume_driver = self._get_volume_driver(connection_info) - volume_driver.attach_volume(connection_info, - instance_name, - disk_bus) - - qos_specs = connection_info['data'].get('qos_specs') or {} - if qos_specs: - volume_driver.set_disk_qos_specs(connection_info, - qos_specs) - - def disconnect_volume(self, connection_info, force=False): - volume_driver = self._get_volume_driver(connection_info) - volume_driver.disconnect_volume(connection_info, force=force) - - def detach_volume(self, connection_info, instance_name): - LOG.debug("Detaching volume: %(connection_info)s " - "from %(instance_name)s", - {'connection_info': strutils.mask_dict_password( - connection_info), - 'instance_name': instance_name}) - volume_driver = self._get_volume_driver(connection_info) - volume_driver.detach_volume(connection_info, instance_name) - volume_driver.disconnect_volume(connection_info) - - def fix_instance_volume_disk_paths(self, instance_name, block_device_info): - # Mapping containing the current disk paths for each volume. - actual_disk_mapping = self.get_disk_path_mapping(block_device_info) - if not actual_disk_mapping: - return - - # Mapping containing virtual disk resource path and the physical - # disk path for each volume serial number. The physical path - # associated with this resource may not be the right one, - # as physical disk paths can get swapped after host reboots. - vm_disk_mapping = self._vmutils.get_vm_physical_disk_mapping( - instance_name) - - for serial, vm_disk in vm_disk_mapping.items(): - actual_disk_path = actual_disk_mapping[serial] - if vm_disk['mounted_disk_path'] != actual_disk_path: - self._vmutils.set_disk_host_res(vm_disk['resource_path'], - actual_disk_path) - - def get_volume_connector(self): - # NOTE(lpetrut): the Windows os-brick connectors - # do not use a root helper. - conn = connector.get_connector_properties( - root_helper=None, - my_ip=CONF.my_block_storage_ip, - multipath=CONF.hyperv.use_multipath_io, - enforce_multipath=True, - host=CONF.host) - return conn - - def connect_volumes(self, block_device_info): - mapping = driver.block_device_info_get_mapping(block_device_info) - for vol in mapping: - connection_info = vol['connection_info'] - volume_driver = self._get_volume_driver(connection_info) - volume_driver.connect_volume(connection_info) - - def get_disk_path_mapping(self, block_device_info): - block_mapping = driver.block_device_info_get_mapping(block_device_info) - disk_path_mapping = {} - for vol in block_mapping: - connection_info = vol['connection_info'] - disk_serial = connection_info['serial'] - - disk_path = self.get_disk_resource_path(connection_info) - disk_path_mapping[disk_serial] = disk_path - return disk_path_mapping - - def get_disk_resource_path(self, connection_info): - volume_driver = self._get_volume_driver(connection_info) - return volume_driver.get_disk_resource_path(connection_info) - - @staticmethod - def bytes_per_sec_to_iops(no_bytes): - # Hyper-v uses normalized IOPS (8 KB increments) - # as IOPS allocation units. - return ( - (no_bytes + constants.IOPS_BASE_SIZE - 1) // - constants.IOPS_BASE_SIZE) - - @staticmethod - def validate_qos_specs(qos_specs, supported_qos_specs): - unsupported_specs = set(qos_specs.keys()).difference( - supported_qos_specs) - if unsupported_specs: - LOG.warning('Got unsupported QoS specs: ' - '%(unsupported_specs)s. ' - 'Supported qos specs: %(supported_qos_specs)s', - {'unsupported_specs': unsupported_specs, - 'supported_qos_specs': supported_qos_specs}) - - -class BaseVolumeDriver(object): - _is_block_dev = True - _protocol = None - _extra_connector_args = {} - - def __init__(self): - self._conn = None - self._diskutils = utilsfactory.get_diskutils() - self._vmutils = utilsfactory.get_vmutils() - self._migrutils = utilsfactory.get_migrationutils() - - @property - def _connector(self): - if not self._conn: - scan_attempts = CONF.hyperv.mounted_disk_query_retry_count - scan_interval = CONF.hyperv.mounted_disk_query_retry_interval - - self._conn = connector.InitiatorConnector.factory( - protocol=self._protocol, - root_helper=None, - use_multipath=CONF.hyperv.use_multipath_io, - device_scan_attempts=scan_attempts, - device_scan_interval=scan_interval, - **self._extra_connector_args) - return self._conn - - def connect_volume(self, connection_info): - return self._connector.connect_volume(connection_info['data']) - - def disconnect_volume(self, connection_info, force=False): - self._connector.disconnect_volume(connection_info['data'], force=force) - - def get_disk_resource_path(self, connection_info): - disk_paths = self._connector.get_volume_paths(connection_info['data']) - if not disk_paths: - vol_id = connection_info['serial'] - err_msg = _("Could not find disk path. Volume id: %s") - raise exception.DiskNotFound(err_msg % vol_id) - - return self._get_disk_res_path(disk_paths[0]) - - def _get_disk_res_path(self, disk_path): - if self._is_block_dev: - # We need the Msvm_DiskDrive resource path as this - # will be used when the disk is attached to an instance. - disk_number = self._diskutils.get_device_number_from_device_name( - disk_path) - disk_res_path = self._vmutils.get_mounted_disk_by_drive_number( - disk_number) - else: - disk_res_path = disk_path - return disk_res_path - - def attach_volume(self, connection_info, instance_name, - disk_bus=constants.CTRL_TYPE_SCSI): - dev_info = self.connect_volume(connection_info) - - serial = connection_info['serial'] - disk_path = self._get_disk_res_path(dev_info['path']) - ctrller_path, slot = self._get_disk_ctrl_and_slot(instance_name, - disk_bus) - if self._is_block_dev: - # We need to tag physical disk resources with the volume - # serial number, in order to be able to retrieve them - # during live migration. - self._vmutils.attach_volume_to_controller(instance_name, - ctrller_path, - slot, - disk_path, - serial=serial) - else: - self._vmutils.attach_drive(instance_name, - disk_path, - ctrller_path, - slot) - - def detach_volume(self, connection_info, instance_name): - if self._migrutils.planned_vm_exists(instance_name): - LOG.warning("Instance %s is a Planned VM, cannot detach " - "volumes from it.", instance_name) - return - - disk_path = self.get_disk_resource_path(connection_info) - - LOG.debug("Detaching disk %(disk_path)s " - "from instance: %(instance_name)s", - dict(disk_path=disk_path, - instance_name=instance_name)) - self._vmutils.detach_vm_disk(instance_name, disk_path, - is_physical=self._is_block_dev) - - def _get_disk_ctrl_and_slot(self, instance_name, disk_bus): - if disk_bus == constants.CTRL_TYPE_IDE: - # Find the IDE controller for the vm. - ctrller_path = self._vmutils.get_vm_ide_controller( - instance_name, 0) - # Attaching to the first slot - slot = 0 - else: - # Find the SCSI controller for the vm - ctrller_path = self._vmutils.get_vm_scsi_controller( - instance_name) - slot = self._vmutils.get_free_controller_slot(ctrller_path) - return ctrller_path, slot - - def set_disk_qos_specs(self, connection_info, disk_qos_specs): - LOG.info("The %(protocol)s Hyper-V volume driver " - "does not support QoS. Ignoring QoS specs.", - dict(protocol=self._protocol)) - - -class ISCSIVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_ISCSI - - def __init__(self, *args, **kwargs): - self._extra_connector_args = dict( - initiator_list=CONF.hyperv.iscsi_initiator_list) - - super(ISCSIVolumeDriver, self).__init__(*args, **kwargs) - - -class SMBFSVolumeDriver(BaseVolumeDriver): - _is_block_dev = False - _protocol = constants.STORAGE_PROTOCOL_SMBFS - _extra_connector_args = dict(local_path_for_loopback=True) - - def export_path_synchronized(f): - def wrapper(inst, connection_info, *args, **kwargs): - export_path = inst._get_export_path(connection_info) - - @utils.synchronized(export_path) - def inner(): - return f(inst, connection_info, *args, **kwargs) - return inner() - return wrapper - - def _get_export_path(self, connection_info): - return connection_info['data']['export'].replace('/', '\\') - - @export_path_synchronized - def attach_volume(self, *args, **kwargs): - super(SMBFSVolumeDriver, self).attach_volume(*args, **kwargs) - - @export_path_synchronized - def disconnect_volume(self, *args, **kwargs): - # We synchronize those operations based on the share path in order to - # avoid the situation when a SMB share is unmounted while a volume - # exported by it is about to be attached to an instance. - super(SMBFSVolumeDriver, self).disconnect_volume(*args, **kwargs) - - def set_disk_qos_specs(self, connection_info, qos_specs): - supported_qos_specs = ['total_iops_sec', 'total_bytes_sec'] - VolumeOps.validate_qos_specs(qos_specs, supported_qos_specs) - - total_bytes_sec = int(qos_specs.get('total_bytes_sec') or 0) - total_iops_sec = int(qos_specs.get('total_iops_sec') or - VolumeOps.bytes_per_sec_to_iops( - total_bytes_sec)) - - if total_iops_sec: - disk_path = self.get_disk_resource_path(connection_info) - self._vmutils.set_disk_qos_specs(disk_path, total_iops_sec) - - -class FCVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_FC - - -class RBDVolumeDriver(BaseVolumeDriver): - _is_block_dev = True - _protocol = constants.STORAGE_PROTOCOL_RBD - _extra_connector_args = dict(do_local_attach=True) diff --git a/releasenotes/notes/remove-hyperv-94d5bfd8a539fe9f.yaml b/releasenotes/notes/remove-hyperv-94d5bfd8a539fe9f.yaml new file mode 100644 index 000000000000..0bee041146e2 --- /dev/null +++ b/releasenotes/notes/remove-hyperv-94d5bfd8a539fe9f.yaml @@ -0,0 +1,29 @@ +--- +upgrade: + - | + The ``HyperV`` virt driver has been removed. It was deprecated in the + Nova 27.2.0 (Antelope) release. This driver was untested and has no + maintainers. In addition, it has a dependency on the OpenStack Winstacker + project that also has been retired. + - | + The following config options which only apply for the ``HyperV`` virt + driver also been removed: + + * ``[hyperv] dynamic_memory_ratio`` + * ``[hyperv] enable_instance_metrics_collection`` + * ``[hyperv] instances_path_share`` + * ``[hyperv] limit_cpu_features`` + * ``[hyperv] mounted_disk_query_retry_count`` + * ``[hyperv] mounted_disk_query_retry_interval`` + * ``[hyperv] power_state_check_timeframe`` + * ``[hyperv] power_state_event_polling_interval`` + * ``[hyperv] qemu_img_cmd`` + * ``[hyperv] vswitch_name`` + * ``[hyperv] wait_soft_reboot_seconds`` + * ``[hyperv] config_drive_cdrom`` + * ``[hyperv] config_drive_inject_password`` + * ``[hyperv] volume_attach_retry_count`` + * ``[hyperv] volume_attach_retry_interval`` + * ``[hyperv] enable_remotefx`` + * ``[hyperv] use_multipath_io`` + * ``[hyperv] iscsi_initiator_list`` diff --git a/setup.cfg b/setup.cfg index eceb6c4ab5fe..f675170e432d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,8 +31,6 @@ osprofiler = osprofiler>=1.4.0 # Apache-2.0 zvm = zVMCloudConnector>=1.3.0;sys_platform!='win32' # Apache 2.0 License -hyperv = - os-win>=5.5.0 # Apache-2.0 vmware = oslo.vmware>=3.6.0 # Apache-2.0 diff --git a/tox.ini b/tox.ini index 852f12c14102..9a6c00691d15 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,6 @@ deps = -r{toxinidir}/test-requirements.txt extras = zvm - hyperv vmware passenv = OS_DEBUG