Merge "libvirt: remove glusterfs volume driver"

This commit is contained in:
Jenkins 2017-05-26 19:19:04 +00:00 committed by Gerrit Code Review
commit dd1a3ac5ae
9 changed files with 43 additions and 337 deletions

View File

@ -9,10 +9,10 @@ file format is supported.
This API is only implemented by the libvirt compute driver.
An internal snapshot that lacks storage such as NFS or GlusterFS can use
An internal snapshot that lacks storage such as NFS can use
an emulator/hypervisor to add the snapshot feature.
This is used to enable snapshot of volumes on backends such as NFS or
GlusterFS by storing data as qcow2 files on these volumes.
This is used to enable snapshot of volumes on backends such as NFS
by storing data as qcow2 files on these volumes.
This API is only ever called by Cinder, where it is used to create a snapshot
for drivers that extend the remotefs Cinder driver.

View File

@ -950,7 +950,7 @@ notes=Block storage provides instances with direct attached
As an alternative to direct attached disks, an instance may
choose to use network based persistent storage. OpenStack provides
object storage via the Swift service, or a traditional filesystem
such as NFS/GlusterFS may be used. Some types of instances may
such as NFS may be used. Some types of instances may
not require persistent storage at all, being simple transaction
processing systems reading requests & sending results to and from
the network. Therefore support for this configuration is not

View File

@ -759,19 +759,6 @@ libvirt_vif_opts = [
]
libvirt_volume_opts = [
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help="""
Protocols listed here will be accessed directly from QEMU.
If gluster is present in qemu_allowed_storage_drivers, glusterfs's backend will
pass a disk configuration to QEMU. This allows QEMU to access the volume using
libgfapi rather than mounting GlusterFS via fuse.
Possible values:
* [gluster]
"""),
cfg.BoolOpt('volume_use_multipath',
default=False,
deprecated_name='iscsi_use_multipath',
@ -801,15 +788,6 @@ attempts that can be made to discover the AoE device.
""")
]
libvirt_volume_glusterfs_opts = [
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Absolute path to the directory where the glusterfs volume is mounted on the
compute node.
""")
]
libvirt_volume_iscsi_opts = [
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
@ -1061,7 +1039,6 @@ ALL_OPTS = list(itertools.chain(
libvirt_vif_opts,
libvirt_volume_opts,
libvirt_volume_aoe_opts,
libvirt_volume_glusterfs_opts,
libvirt_volume_iscsi_opts,
libvirt_volume_iser_opts,
libvirt_volume_net_opts,

View File

@ -1127,12 +1127,12 @@ class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
def test_config_network_parse(self):
xml = """<backingStore type='network' index='1'>
<format type='qcow2'/>
<source protocol='gluster' name='volume1/img1'>
<source protocol='netfs' name='volume1/img1'>
<host name='host1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<format type='qcow2'/>
<source protocol='gluster' name='volume1/img2'>
<source protocol='netfs' name='volume1/img2'>
<host name='host1' port='24007'/>
</source>
<backingStore/>
@ -1145,7 +1145,7 @@ class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
obj.parse_dom(xmldoc)
self.assertEqual(obj.source_type, 'network')
self.assertEqual(obj.source_protocol, 'gluster')
self.assertEqual(obj.source_protocol, 'netfs')
self.assertEqual(obj.source_name, 'volume1/img1')
self.assertEqual(obj.source_hosts[0], 'host1')
self.assertEqual(obj.source_ports[0], '24007')
@ -2474,7 +2474,7 @@ class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
disk.source_type = 'network'
disk.source_hosts = ['host1']
disk.source_ports = ['12345']
disk.source_protocol = 'glusterfs'
disk.source_protocol = 'netfs'
disk.snapshot = 'external'
disk.driver_name = 'qcow2'
obj.add_disk(disk)
@ -2490,7 +2490,7 @@ class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
<name>Demo</name>
<disks>
<disk name='vda' snapshot='external' type='network'>
<source protocol='glusterfs' name='source-file'>
<source protocol='netfs' name='source-file'>
<host name='host1' port='12345'/>
</source>
</disk>

View File

@ -17834,7 +17834,6 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
@ -17870,17 +17869,17 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<source protocol='netfs' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<source protocol='netfs' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<source protocol='netfs' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
@ -17904,12 +17903,12 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<source protocol='netfs' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<source protocol='netfs' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
@ -18065,7 +18064,6 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
def test_volume_snapshot_create_libgfapi(self):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
@ -18078,7 +18076,7 @@ class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<source protocol='netfs' name='netfs1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>

View File

@ -1,178 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_concurrency import processutils
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import glusterfs
class LibvirtGlusterfsVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_glusterfs_driver(self, mock_is_mounted):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
device_path = os.path.join(export_mnt_base,
connection_info['data']['name'])
self.assertEqual(connection_info['data']['device_path'], device_path)
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs', export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
self.assertTrue(mock_is_mounted.called)
def test_libvirt_glusterfs_driver_get_config(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
# Test default format - raw
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('raw', tree.find('./driver').get('type'))
# Test specified format - qcow2
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path,
'format': 'qcow2'}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('qcow2', tree.find('./driver').get('type'))
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_glusterfs_driver_already_mounted(self, mock_is_mounted):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
@mock.patch.object(glusterfs.utils, 'execute')
@mock.patch.object(glusterfs.LOG, 'debug')
@mock.patch.object(glusterfs.LOG, 'exception')
def test_libvirt_glusterfs_driver_umount_error(self, mock_LOG_exception,
mock_LOG_debug, mock_utils_exe):
export_string = '192.168.1.1:/volume-00001'
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
mock_utils_exe.side_effect = processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: target is busy.')
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
self.assertTrue(mock_LOG_debug.called)
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_glusterfs_driver_with_opts(self, mock_is_mounted):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/volume-00001'
options = '-o backupvolfile-server=192.168.1.2'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
libvirt_driver.connect_volume(connection_info, self.disk_info,
mock.sentinel.instance)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs',
'-o', 'backupvolfile-server=192.168.1.2',
export_string, export_mnt_base),
('umount', export_mnt_base),
]
self.assertEqual(expected_commands, self.executes)
self.assertTrue(mock_is_mounted.called)
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_glusterfs_libgfapi(self, mock_is_mounted):
self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/volume-00001'
name = 'volume-00001'
connection_info = {'data': {'export': export_string, 'name': name}}
disk_info = {
"dev": "vde",
"type": "disk",
"bus": "virtio",
}
libvirt_driver.connect_volume(connection_info, disk_info,
mock.sentinel.instance)
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('network', tree.get('type'))
self.assertEqual('raw', tree.find('./driver').get('type'))
source = tree.find('./source')
self.assertEqual('gluster', source.get('protocol'))
self.assertEqual('volume-00001/volume-00001', source.get('name'))
self.assertEqual('192.168.1.1', source.find('./host').get('name'))
self.assertEqual('24007', source.find('./host').get('port'))
self.assertFalse(mock_is_mounted.called)
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)

View File

@ -156,8 +156,6 @@ libvirt_volume_drivers = [
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
@ -400,11 +398,10 @@ class LibvirtDriver(driver.ComputeDriver):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
# Some filesystems don't support O_DIRECT though. For those we
# fallback to 'writethrough' which gives host crash safety, and
# is safe for migration provided the filesystem is cache coherent
# (cluster filesystems typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
@ -1893,7 +1890,7 @@ class LibvirtDriver(driver.ComputeDriver):
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
network_disks_to_snap = [] # network disks (netfs, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
@ -1925,7 +1922,12 @@ class LibvirtDriver(driver.ComputeDriver):
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
# NOTE(mriedem): This used to include a check for gluster in
# addition to netfs since they were added together. Support for
# gluster was removed in the 16.0.0 Pike release. It is unclear,
# however, if other volume drivers rely on the netfs disk source
# protocol.
elif disk_info['source_protocol'] == 'netfs':
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:

View File

@ -1,110 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log as logging
import six
import nova.conf
from nova.i18n import _LE, _LW
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import fs
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class LibvirtGlusterfsVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def _get_mount_point_base(self):
return CONF.libvirt.glusterfs_mount_point_base
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info, instance):
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(connection_info)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
mount_path = self._get_mount_path(connection_info)
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
export = connection_info['data']['export']
if 'target is busy' in six.text_type(exc):
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, connection_info):
"""@type connection_info: dict
"""
glusterfs_export = connection_info['data']['export']
mount_path = self._get_mount_path(connection_info)
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
options = connection_info['data'].get('options')
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in six.text_type(exc):
LOG.warning(_LW("%s is already mounted"), glusterfs_share)
else:
raise

View File

@ -0,0 +1,17 @@
---
upgrade:
- |
The ``nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver``
volume driver has been removed. The GlusterFS volume driver in Cinder was
deprecated during the Newton release and was removed from Cinder in the
Ocata release so it is effectively not maintained and therefore no longer
supported.
The following configuration options, previously found in the ``libvirt``
group, have been removed:
- ``glusterfs_mount_point_base``
- ``qemu_allowed_storage_drivers``
These were used by the now-removed ``LibvirtGlusterfsVolumeDriver`` volume
driver and therefore no longer had any effect.