Optimize the loading of the volume driver

In before, we load the volume driver on demand. This is undesirable
because the volume driver will be loaded per request which is
unnecessary. After this patch, the volume drivers will be loaded
once on initialization. Each request will retrieve the pre-loaded
driver instead.

Change-Id: Idb1dec95d026744e6a34d2082945bf2e580862c0
This commit is contained in:
Hongbin Lu 2018-03-30 20:50:00 +00:00
parent 24a6602f0b
commit 3029a5b546
3 changed files with 58 additions and 54 deletions

View File

@ -99,6 +99,7 @@ class DockerDriver(driver.ContainerDriver):
super(DockerDriver, self).__init__()
self._host = host.Host()
self._get_host_storage_info()
self.volume_driver = vol_driver.driver()
def _get_host_storage_info(self):
storage_info = self._host.get_storage_info()
@ -261,9 +262,8 @@ class DockerDriver(driver.ContainerDriver):
def _get_binds(self, context, requested_volumes):
binds = {}
for volume in requested_volumes:
volume_driver = vol_driver.driver(provider=volume.volume_provider,
context=context)
source, destination = volume_driver.bind_mount(volume)
source, destination = self.volume_driver.bind_mount(context,
volume)
binds[source] = {'bind': destination}
return binds
@ -849,28 +849,16 @@ class DockerDriver(driver.ContainerDriver):
return sandbox['Id']
def attach_volume(self, context, volume_mapping):
volume_driver = vol_driver.driver(
provider=volume_mapping.volume_provider,
context=context)
volume_driver.attach(volume_mapping)
self.volume_driver.attach(context, volume_mapping)
def detach_volume(self, context, volume_mapping):
volume_driver = vol_driver.driver(
provider=volume_mapping.volume_provider,
context=context)
volume_driver.detach(volume_mapping)
self.volume_driver.detach(context, volume_mapping)
def delete_volume(self, context, volume_mapping):
volume_driver = vol_driver.driver(
provider=volume_mapping.volume_provider,
context=context)
volume_driver.delete(volume_mapping)
self.volume_driver.delete(context, volume_mapping)
def is_volume_available(self, context, volume_mapping):
volume_driver = vol_driver.driver(
provider=volume_mapping.volume_provider,
context=context)
return volume_driver.is_volume_available(volume_mapping)
return self.volume_driver.is_volume_available(context, volume_mapping)
def _get_or_create_docker_network(self, context, network_api,
neutron_net_id):

View File

@ -35,6 +35,7 @@ class VolumeDriverTestCase(base.TestCase):
'data': {'device_path': self.fake_devpath},
}
self.volume = mock.MagicMock()
self.volume.volume_provider = 'cinder'
self.volume.volume_id = self.fake_volume_id
self.volume.container_path = self.fake_container_path
self.volume.connection_info = jsonutils.dumps(self.fake_conn_info)
@ -50,8 +51,8 @@ class VolumeDriverTestCase(base.TestCase):
mock_cinder_workflow.attach_volume.return_value = self.fake_devpath
mock_get_mountpoint.return_value = self.fake_mountpoint
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver.attach(self.volume)
volume_driver = driver.Cinder()
volume_driver.attach(self.context, self.volume)
mock_cinder_workflow.attach_volume.assert_called_once_with(self.volume)
mock_get_mountpoint.assert_called_once_with(self.fake_volume_id)
@ -66,8 +67,10 @@ class VolumeDriverTestCase(base.TestCase):
def test_attach_unknown_provider(self, mock_cinder_workflow_cls,
mock_get_mountpoint, mock_ensure_tree,
mock_do_mount):
volume_driver = driver.Cinder()
self.volume.volume_provider = 'unknown'
self.assertRaises(exception.ZunException,
driver.Cinder, self.context, 'unknown')
volume_driver.attach, self.context, self.volume)
@mock.patch('zun.common.mount.do_mount')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@ -82,9 +85,9 @@ class VolumeDriverTestCase(base.TestCase):
exception.ZunException()
mock_get_mountpoint.return_value = self.fake_mountpoint
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver = driver.Cinder()
self.assertRaises(exception.ZunException,
volume_driver.attach, self.volume)
volume_driver.attach, self.context, self.volume)
mock_cinder_workflow.attach_volume.assert_called_once_with(self.volume)
mock_get_mountpoint.assert_not_called()
@ -104,9 +107,9 @@ class VolumeDriverTestCase(base.TestCase):
mock_get_mountpoint.return_value = self.fake_mountpoint
mock_do_mount.side_effect = exception.ZunException()
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver = driver.Cinder()
self.assertRaises(exception.ZunException,
volume_driver.attach, self.volume)
volume_driver.attach, self.context, self.volume)
mock_cinder_workflow.attach_volume.assert_called_once_with(self.volume)
mock_get_mountpoint.assert_called_once_with(self.fake_volume_id)
@ -135,9 +138,9 @@ class VolumeDriverTestCase(base.TestCase):
mock_do_mount.side_effect = TestException1()
mock_cinder_workflow.detach_volume.side_effect = TestException2()
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver = driver.Cinder()
self.assertRaises(TestException1,
volume_driver.attach, self.volume)
volume_driver.attach, self.context, self.volume)
mock_cinder_workflow.attach_volume.assert_called_once_with(self.volume)
mock_get_mountpoint.assert_called_once_with(self.fake_volume_id)
@ -155,8 +158,8 @@ class VolumeDriverTestCase(base.TestCase):
mock_cinder_workflow.detach_volume.return_value = self.fake_devpath
mock_get_mountpoint.return_value = self.fake_mountpoint
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver.detach(self.volume)
volume_driver = driver.Cinder()
volume_driver.detach(self.context, self.volume)
mock_cinder_workflow.detach_volume.assert_called_once_with(self.volume)
mock_get_mountpoint.assert_called_once_with(self.fake_volume_id)
@ -170,8 +173,9 @@ class VolumeDriverTestCase(base.TestCase):
mock_cinder_workflow_cls.return_value = mock_cinder_workflow
mock_get_mountpoint.return_value = self.fake_mountpoint
volume_driver = driver.Cinder(self.context, 'cinder')
source, destination = volume_driver.bind_mount(self.volume)
volume_driver = driver.Cinder()
source, destination = volume_driver.bind_mount(
self.context, self.volume)
self.assertEqual(self.fake_mountpoint, source)
self.assertEqual(self.fake_container_path, destination)
@ -184,7 +188,7 @@ class VolumeDriverTestCase(base.TestCase):
mock_cinder_workflow_cls.return_value = mock_cinder_workflow
mock_cinder_workflow.delete_volume.return_value = self.fake_volume_id
volume_driver = driver.Cinder(self.context, 'cinder')
volume_driver.delete(self.volume)
volume_driver = driver.Cinder()
volume_driver.delete(self.context, self.volume)
mock_cinder_workflow.delete_volume.assert_called_once_with(self.volume)

View File

@ -11,6 +11,7 @@
# under the License.
import abc
import functools
import six
from oslo_log import log as logging
@ -45,21 +46,27 @@ def driver(*args, **kwargs):
return volume_driver
def validate_volume_provider(supported_providers):
"""Wraps a method to validate volume provider."""
def decorator(function):
@functools.wraps(function)
def decorated_function(self, context, volume, **kwargs):
provider = volume.volume_provider
if provider not in supported_providers:
msg = _("The volume provider '%s' is not supported") % provider
raise exception.ZunException(msg)
return function(self, context, volume, **kwargs)
return decorated_function
return decorator
@six.add_metaclass(abc.ABCMeta)
class VolumeDriver(object):
"""The base class that all Volume classes should inherit from."""
# Subclass should overwrite this list.
supported_providers = []
def __init__(self, context, provider):
if provider not in self.supported_providers:
msg = _("Unsupported volume provider '%s'") % provider
raise exception.ZunException(msg)
self.context = context
self.provider = provider
def attach(self, *args, **kwargs):
raise NotImplementedError()
@ -82,8 +89,9 @@ class Cinder(VolumeDriver):
'cinder'
]
def attach(self, volume):
cinder = cinder_workflow.CinderWorkflow(self.context)
@validate_volume_provider(supported_providers)
def attach(self, context, volume):
cinder = cinder_workflow.CinderWorkflow(context)
devpath = cinder.attach_volume(volume)
try:
self._mount_device(volume, devpath)
@ -100,14 +108,16 @@ class Cinder(VolumeDriver):
fileutils.ensure_tree(mountpoint)
mount.do_mount(devpath, mountpoint, CONF.volume.fstype)
def detach(self, volume):
@validate_volume_provider(supported_providers)
def detach(self, context, volume):
self._unmount_device(volume)
cinder = cinder_workflow.CinderWorkflow(self.context)
cinder = cinder_workflow.CinderWorkflow(context)
cinder.detach_volume(volume)
def delete(self, volume):
@validate_volume_provider(supported_providers)
def delete(self, context, volume):
self._unmount_device(volume)
cinder = cinder_workflow.CinderWorkflow(self.context)
cinder = cinder_workflow.CinderWorkflow(context)
cinder.delete_volume(volume)
def _unmount_device(self, volume):
@ -116,12 +126,14 @@ class Cinder(VolumeDriver):
mountpoint = mount.get_mountpoint(volume.volume_id)
mount.do_unmount(devpath, mountpoint)
def bind_mount(self, volume):
@validate_volume_provider(supported_providers)
def bind_mount(self, context, volume):
mountpoint = mount.get_mountpoint(volume.volume_id)
return mountpoint, volume.container_path
def is_volume_available(self, volume):
ca = cinder_api.CinderAPI(self.context)
@validate_volume_provider(supported_providers)
def is_volume_available(self, context, volume):
ca = cinder_api.CinderAPI(context)
if 'available' == ca.get(volume.volume_id).status:
return True
else: