XenAPI: Perform disk operations in dom0

Move some disk operations from DomU to Dom0 to allow Nova
to run independently from the hypervisor.

Some disk types can be created in Dom0 (e.g. swap, ext3) but
others cannot.  Add support here for creating those that can
in Dom0.  This will be documented in the independent_compute
config flag in the last patch in this series.

Implements: blueprint xenapi-independent-nova
Change-Id: I634e783a5e19f9710544920e12300b299fc06b36
This commit is contained in:
Bob Ball 2016-06-15 15:49:11 +01:00
parent 76e857cade
commit 3e85b80649
12 changed files with 467 additions and 187 deletions

View File

@ -58,7 +58,7 @@ class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
self.mox.StubOutWithMock(vdi_through_dev, 'utils')
store._get_vdi_ref().AndReturn('vdi_ref')
vdi_through_dev.vm_utils.vdi_attached_here(
vdi_through_dev.vm_utils.vdi_attached(
'session', 'vdi_ref', read_only=True).AndReturn(
fake_context('dev'))
vdi_through_dev.utils.make_dev_path('dev').AndReturn('devpath')

View File

@ -0,0 +1,102 @@
# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.tests.unit.virt.xenapi.plugins import plugin_test
class PartitionUtils(plugin_test.PluginTestBase):
def setUp(self):
super(PartitionUtils, self).setUp()
self.partition_utils = self.load_plugin("partition_utils.py")
def test_wait_for_dev_ok(self):
mock_sleep = self.mock_patch_object(self.partition_utils.time,
'sleep')
mock_exists = self.mock_patch_object(self.partition_utils.os.path,
'exists')
mock_exists.side_effect = [False, True]
ret = self.partition_utils.wait_for_dev('session', '/fake', 2)
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(ret, "/fake")
def test_wait_for_dev_timeout(self):
mock_sleep = self.mock_patch_object(self.partition_utils.time,
'sleep')
mock_exists = self.mock_patch_object(self.partition_utils.os.path,
'exists')
mock_exists.side_effect = [False, False, True]
ret = self.partition_utils.wait_for_dev('session', '/fake', 2)
self.assertEqual(2, mock_sleep.call_count)
self.assertEqual(ret, "")
def test_mkfs_removes_partitions_ok(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
mock__mkfs = self.mock_patch_object(self.partition_utils, '_mkfs')
self.partition_utils.mkfs('session', 'fakedev', '1', 'ext3', 'label')
mock__mkfs.assert_called_with('ext3', '/dev/mapper/fakedevp1',
'label')
expected_calls = [mock.call(['kpartx', '-avspp', '/dev/fakedev'])]
expected_calls.append(mock.call(['kpartx', '-dvspp', '/dev/fakedev']))
mock_run.assert_has_calls(expected_calls)
def test_mkfs_removes_partitions_exc(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
mock__mkfs = self.mock_patch_object(self.partition_utils, '_mkfs')
mock__mkfs.side_effect = test.TestingException()
self.assertRaises(test.TestingException, self.partition_utils.mkfs,
'session', 'fakedev', '1', 'ext3', 'label')
expected_calls = [mock.call(['kpartx', '-avspp', '/dev/fakedev'])]
expected_calls.append(mock.call(['kpartx', '-dvspp', '/dev/fakedev']))
mock_run.assert_has_calls(expected_calls)
def test_mkfs_ext3_no_label(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
self.partition_utils._mkfs('ext3', '/dev/sda1', None)
mock_run.assert_called_with(['mkfs', '-t', 'ext3', '-F', '/dev/sda1'])
def test_mkfs_ext3(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
self.partition_utils._mkfs('ext3', '/dev/sda1', 'label')
mock_run.assert_called_with(['mkfs', '-t', 'ext3', '-F', '-L',
'label', '/dev/sda1'])
def test_mkfs_swap(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
self.partition_utils._mkfs('swap', '/dev/sda1', 'ignored')
mock_run.assert_called_with(['mkswap', '/dev/sda1'])
def test_make_partition(self):
mock_run = self.mock_patch_object(self.partition_utils.utils,
'run_command')
self.partition_utils.make_partition('session', 'dev', 'start', '-')
mock_run.assert_called_with(['sfdisk', '-uS', '/dev/dev'],
'start,;\n')

View File

@ -257,7 +257,7 @@ def stub_out_vm_methods(stubs):
def fake_generate_ephemeral(*args):
pass
def fake_wait_for_device(dev):
def fake_wait_for_device(session, dev, dom0, max_seconds):
pass
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)

View File

@ -41,7 +41,6 @@ from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.tests.unit.virt.xenapi import test_xenapi
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova.virt import hardware
@ -168,8 +167,8 @@ class GenerateConfigDriveTestCase(VMUtilsTestBase):
'configdrive',
64 * units.Mi).AndReturn('vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
vm_utils.vdi_attached_here(
self.mox.StubOutWithMock(vm_utils, 'vdi_attached')
vm_utils.vdi_attached(
'session', 'vdi_ref', read_only=False).AndReturn(
contextified('mounted_dev'))
@ -205,7 +204,7 @@ class GenerateConfigDriveTestCase(VMUtilsTestBase):
'userdevice', "nw_info")
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, "vdi_attached_here")
@mock.patch.object(vm_utils, "vdi_attached")
@mock.patch.object(vm_utils, "create_vdi")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
@ -1065,119 +1064,140 @@ class VDIOtherConfigTestCase(VMUtilsTestBase):
class GenerateDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateDiskTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
self.vm_ref = fake.create_vm("foo", "Running")
def tearDown(self):
super(GenerateDiskTestCase, self).tearDown()
fake.destroy_vm(self.vm_ref)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_with_no_fs_given(self, mock_create_vbd,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = _get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
def _expect_parted_calls(self):
self.mox.StubOutWithMock(utils, "execute")
self.mox.StubOutWithMock(utils, "trycmd")
self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
self.mox.StubOutWithMock(vm_utils.os.path, "exists")
if self.session.is_local_connection:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=False, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '2048s', '-0',
check_exit_code=False, run_as_root=True)
vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
utils.trycmd('kpartx', '-a', '/dev/fakedev',
discard_warnings=True, run_as_root=True)
else:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=True, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '2048s', '-0',
check_exit_code=True, run_as_root=True)
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, None, None)
def _check_vdi(self, vdi_ref, check_attached=True):
vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
if check_attached:
vbd_ref = vdi_rec["VBDs"][0]
vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
self.assertEqual(self.vm_ref, vbd_rec['VM'])
else:
self.assertEqual(0, len(vdi_rec["VBDs"]))
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False,
dom0=True)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_with_no_fs_given(self):
self._expect_parted_calls()
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_swap(self, mock_create_vbd, mock_make_path,
mock_create_vdi,
mock_findsr, mock_dom0ref, mock_mkfs,
mock_attached_here):
session = _get_fake_session()
vdi_dev = mock.MagicMock()
mock_attached_here.return_value = vdi_dev
vdi_dev.__enter__.return_value = 'fakedev'
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, 'swap',
'swap-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As swap is supported in dom0, mkfs will run there
session.call_plugin_serialized.assert_any_call(
'partition_utils.py', 'mkfs', 'fakedev', '1', 'swap', 'swap-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral(self, mock_create_vbd, mock_make_path,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = _get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'ephemeral', 10, 'ext4',
'ephemeral-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As ext4 is not supported in dom0, mkfs will run in domU
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False)
mock_mkfs.assert_called_with('ext4', '/dev/fake_devp1',
'ephemeral-1', run_as_root=True)
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, '_get_dom0_ref',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_disk_ensure_cleanup_called(self, mock_destroy_vdis,
mock_dom0ref,
mock_create_vdi,
mock_findsr):
session = _get_fake_session()
instance = {'uuid': 'fake_uuid'}
self.assertRaises(test.TestingException, vm_utils._generate_disk,
session, instance, None, '2', 'name', 'user', 10,
None, None)
mock_destroy_vdis.assert_called_once_with(session, ['vdi_ref'])
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral_no_vmref(self, mock_create_vbd,
mock_dom0_ref,
mock_attached_here,
mock_create_vdi,
mock_findsr):
session = _get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "user", 10, None, None)
self._check_vdi(vdi_ref)
session, instance,
None, None, 'name', 'user', 10, None, None)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_swap(self):
self._expect_parted_calls()
utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "swap", 10, "swap", None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
'/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"}, self.vm_ref,
"4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ensure_cleanup_called(self):
self._expect_parted_calls()
utils.execute(
'mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/fakedev1',
run_as_root=True).AndRaise(test.TestingException)
vm_utils.destroy_vdi(
self.session,
mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(
test.TestingException, vm_utils._generate_disk,
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral_local_not_attached(self):
self.session.is_local_connection = True
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
'/dev/mapper/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(
self.session, {"uuid": "fake_uuid"},
None, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref, check_attached=False)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False, dom0=True)
self.assertFalse(mock_create_vbd.called)
class GenerateEphemeralTestCase(VMUtilsTestBase):
@ -1827,22 +1847,6 @@ class GetAllVdisTestCase(VMUtilsTestBase):
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class VDIAttachedHere(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'destroy_vbd')
@mock.patch.object(vm_utils, '_get_this_vm_ref')
@mock.patch.object(vm_utils, 'create_vbd')
@mock.patch.object(vm_utils, '_remap_vbd_dev')
@mock.patch.object(vm_utils, '_wait_for_device')
@mock.patch.object(utils, 'execute')
def test_sync_called(self, mock_execute, mock_wait_for_device,
mock_remap_vbd_dev, mock_create_vbd,
mock_get_this_vm_ref, mock_destroy_vbd):
session = _get_fake_session()
with vm_utils.vdi_attached_here(session, 'vdi_ref'):
pass
mock_execute.assert_called_with('sync', run_as_root=True)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):

View File

@ -178,30 +178,30 @@ def get_fake_device_info():
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""vm_utils.with_vdi_attached_here needs to be stubbed out because it
def stub_vm_utils_with_vdi_attached(function):
"""vm_utils.with_vdi_attached needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
def fake_vdi_attached(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_vdi_attached = vm_utils.vdi_attached
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
vm_utils.vdi_attached = fake_vdi_attached
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
vm_utils.vdi_attached = orig_vdi_attached
return decorated_function
@ -772,6 +772,9 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
@ -781,8 +784,11 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
@ -792,8 +798,11 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
ipxe_boot_menu_url=None,
group='xenserver')
# call_plugin_serialized shouldn't be called
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
@ -803,8 +812,11 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
# ipxe inject shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized('partition_utils.py',
'make_partition',
'fakedev', '2048', '-')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
@ -1871,7 +1883,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_migrate_too_many_partitions_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
@ -1883,12 +1895,13 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_migrate_bad_fs_type_no_resize_down(self):
instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
@ -1900,6 +1913,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
@ -2334,6 +2348,8 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
{'id': uuids.image_id,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}})
self.mox.ReplayAll()
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
@ -2346,7 +2362,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
@ -2358,7 +2374,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
@ -2370,7 +2386,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
@ -2382,7 +2398,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
@stub_vm_utils_with_vdi_attached
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.

View File

@ -70,7 +70,7 @@ class XenAPISession(object):
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.6'
PLUGIN_REQUIRED_VERSION = '1.7'
def __init__(self, url, user, pw):
version_string = version.version_string_with_package()
@ -82,6 +82,7 @@ class XenAPISession(object):
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
self.host_checked = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)

View File

@ -81,7 +81,8 @@ def reset():
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True)
is_control_domain=True,
domid='0')
def reset_table(table):
@ -125,12 +126,12 @@ def create_vm(name_label, status, **kwargs):
domid = "-1"
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_rec = {'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on}
vm_rec.update(kwargs.copy())
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
@ -494,7 +495,7 @@ class SessionBase(object):
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
rec['device'] = 'fakedev'
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
@ -760,12 +761,16 @@ class SessionBase(object):
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.6")
return pickle.dumps("1.7")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def _plugin_partition_utils_dot_py_make_partition(self, method, args):
return pickle.dumps(None)
def host_call_plugin(self, _1, _2, plugin, method, args):
plugin = plugin.replace('.', '_dot_')
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %

View File

@ -76,8 +76,8 @@ class UploadToGlanceAsRawTgz(object):
def upload_image(self):
vdi_ref = self._get_vdi_ref()
with vm_utils.vdi_attached_here(self.session, vdi_ref,
read_only=True) as dev:
with vm_utils.vdi_attached(self.session, vdi_ref,
read_only=True) as dev:
devpath = utils.make_dev_path(dev)
with utils.temporary_chown(devpath):
self._perform_upload(devpath)

View File

@ -893,12 +893,15 @@ def _auto_configure_disk(session, vdi_ref, new_gb):
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
4. We are not running in independent_compute mode (checked by
vdi_attached)
"""
if new_gb == 0:
LOG.debug("Skipping auto_config_disk as destination size is 0GB")
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
with vdi_attached(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
@ -963,13 +966,20 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type, fs_label=None):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
1. Create VDI of desired size
2. Attach VDI to compute worker
2. Attach VDI to Dom0
3. Create partition
3. Create partition
3.a. If the partition type is supported by dom0 (currently ext3,
swap) then create it while the VDI is attached to dom0.
3.b. If the partition type is not supported by dom0, attach the
VDI to the domU and create there.
This split between DomU/Dom0 ensures that we can create most
VM types in the "isolated compute" case.
4. Create VBD between instance VM and VDI
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
@ -979,16 +989,26 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 2. Attach VDI to Dom0 (VBD hotplug)
mkfs_in_dom0 = fs_type in ('ext3', 'swap')
with vdi_attached(session, vdi_ref, read_only=False,
dom0=True) as dev:
# 3. Create partition
partition_start = "2048s"
partition_end = "-0"
partition_start = "2048"
partition_end = "-"
partition_path = _make_partition(session, dev,
partition_start, partition_end)
session.call_plugin_serialized('partition_utils.py',
'make_partition', dev,
partition_start, partition_end)
if fs_type is not None:
if mkfs_in_dom0:
session.call_plugin_serialized('partition_utils.py', 'mkfs',
dev, '1', fs_type, fs_label)
# 3.a. dom0 does not support nfs/ext4, so may have to mkfs in domU
if fs_type is not None and not mkfs_in_dom0:
with vdi_attached(session, vdi_ref, read_only=False) as dev:
partition_path = utils.make_dev_path(dev, partition=1)
utils.mkfs(fs_type, partition_path, fs_label,
run_as_root=True)
@ -1084,7 +1104,7 @@ def generate_configdrive(session, instance, vm_ref, userdevice,
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
with vdi_attached(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
@ -1496,7 +1516,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
with vdi_attached(session, vdi_ref, read_only=False) as dev:
_stream_disk(
session, image.stream_to, image_type, virtual_size, dev)
@ -1658,7 +1678,7 @@ def preconfigure_instance(session, instance, vdi_ref, network_info):
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
with vdi_attached(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
@ -2080,16 +2100,24 @@ def _remap_vbd_dev(dev):
return remapped_dev
def _wait_for_device(dev):
def _wait_for_device(session, dev, dom0, max_seconds):
"""Wait for device node to appear."""
for i in range(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
dev_path = utils.make_dev_path(dev)
found_path = None
if dom0:
found_path = session.call_plugin_serialized('partition_utils.py',
'wait_for_dev',
dev_path, max_seconds)
else:
for i in range(0, max_seconds):
if os.path.exists(dev_path):
found_path = dev_path
break
time.sleep(1)
raise exception.StorageError(
reason=_('Timeout waiting for device %s to be created') % dev)
if found_path is None:
raise exception.StorageError(
reason=_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
@ -2116,8 +2144,13 @@ def cleanup_attached_vdis(session):
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
def vdi_attached(session, vdi_ref, read_only=False, dom0=False):
if dom0:
this_vm_ref = _get_dom0_ref(session)
else:
# Make sure we are running as a domU.
ensure_correct_host(session)
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
@ -2134,10 +2167,13 @@ def vdi_attached_here(session, vdi_ref, read_only=False):
LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s',
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
_wait_for_device(session, dev, dom0,
CONF.xenserver.block_device_creation_timeout)
yield dev
finally:
utils.execute('sync', run_as_root=True)
# As we can not have filesystems mounted here (we cannot
# destroy the VBD with filesystems mounted), it is not
# useful to call sync.
LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
@ -2154,11 +2190,19 @@ def _get_sys_hypervisor_uuid():
return f.readline().strip()
def _get_dom0_ref(session):
vms = session.call_xenapi("VM.get_all_records_where",
'field "domid"="0" and '
'field "resident_on"="%s"' %
session.host_ref)
return list(vms.keys())[0]
def get_this_vm_uuid(session):
if session and session.is_local_connection:
# UUID is the control domain running on this host
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="true" and '
'field "domid"="0" and '
'field "resident_on"="%s"' %
session.host_ref)
return vms[list(vms.keys())[0]]['uuid']
@ -2353,10 +2397,10 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
with vdi_attached(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
with vdi_attached(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(session, virtual_size, dst)
@ -2421,12 +2465,17 @@ def _mounted_processing(device, key, net, metadata):
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
required configuration for anything that uses vdi_attached without
the dom0 flag.
"""
if session.host_checked:
return
this_vm_uuid = get_this_vm_uuid(session)
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
session.host_checked = True
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise

View File

@ -32,7 +32,8 @@ import utils
# 1.4 - Add support of Glance v2 api
# 1.5 - Added function for network configuration on ovs bridge
# 1.6 - Add function for network configuration on Linux bridge
PLUGIN_VERSION = "1.6"
# 1.7 - Add Partition utilities plugin
PLUGIN_VERSION = "1.7"
def get_version(session):

View File

@ -0,0 +1,87 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
import logging
import os
import time
import pluginlib_nova as pluginlib
import utils
pluginlib.configure_logging("disk_utils")
_ = pluginlib._
def wait_for_dev(session, dev_path, max_seconds):
for i in range(0, max_seconds):
if os.path.exists(dev_path):
return dev_path
time.sleep(1)
return ""
def make_partition(session, dev, partition_start, partition_end):
dev_path = utils.make_dev_path(dev)
if partition_end != "-":
raise pluginlib.PluginError("Can only create unbounded partitions")
utils.run_command(['sfdisk', '-uS', dev_path],
'%s,;\n' % (partition_start))
def _mkfs(fs, path, label):
"""Format a file or block device
:param fs: Filesystem type (only 'swap', 'ext3' supported)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
elif fs == 'ext3':
args = ['mkfs', '-t', fs]
# add -F to force no interactive execute on non-block device.
args.extend(['-F'])
if label:
args.extend(['-L', label])
else:
raise pluginlib.PluginError("Partition type %s not supported" % fs)
args.append(path)
utils.run_command(args)
def mkfs(session, dev, partnum, fs_type, fs_label):
dev_path = utils.make_dev_path(dev)
out = utils.run_command(['kpartx', '-avspp', dev_path])
try:
logging.info('kpartx output: %s' % out)
mapperdir = os.path.join('/dev', 'mapper')
dev_base = os.path.basename(dev)
partition_path = os.path.join(mapperdir, "%sp%s" % (dev_base, partnum))
_mkfs(fs_type, partition_path, fs_label)
finally:
# Always remove partitions otherwise we can't unplug the VBD
utils.run_command(['kpartx', '-dvspp', dev_path])
if __name__ == "__main__":
utils.register_plugin_calls(wait_for_dev,
make_partition,
mkfs)

View File

@ -486,6 +486,21 @@ def extract_tarball(fileobj, path, callback=None):
finish_subprocess(tar_proc, tar_cmd)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def _handle_serialization(func):
def wrapped(session, params):
params = pickle.loads(params['params'])