Cleanup mount / umount and associated rmdir calls

Add a new filesytem mounting helper in privsep, and then start
moving things across to it. This currently implements mount and
unmount. We get to cleanup some rmdir calls while we're at it
which is nice as well.

I've added an upgrade note mentioning that we no longer ignore
the value of stderr from mount calls, as requesed in code review.

Change-Id: Ib5e585fa4bfb99617cd3ca983674114d323a3cce
blueprint: hurrah-for-privsep
This commit is contained in:
Michael Still 2017-09-27 06:53:15 +10:00
parent 5e4c98a58f
commit 7ad72b0922
17 changed files with 302 additions and 337 deletions

View File

@ -10,18 +10,6 @@ kpartx: CommandFilter, kpartx, root
# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
tune2fs: CommandFilter, tune2fs, root
# nova/virt/disk/mount/api.py: 'mount', mapped_device
# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
# nova/virt/configdrive.py: 'mount', device, mountdir
mount: CommandFilter, mount, root
# nova/virt/disk/mount/api.py: 'umount', mapped_device
# nova/virt/disk/api.py: 'umount' target
# nova/virt/xenapi/vm_utils.py: 'umount', dev_path
# nova/virt/configdrive.py: 'umount', mountdir
umount: CommandFilter, umount, root
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
qemu-nbd: CommandFilter, qemu-nbd, root

38
nova/privsep/fs.py Normal file
View File

@ -0,0 +1,38 @@
# Copyright 2016 Red Hat, Inc
# Copyright 2017 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for filesystem related routines.
"""
from oslo_concurrency import processutils
import nova.privsep
@nova.privsep.sys_admin_pctxt.entrypoint
def mount(fstype, device, mountpoint, options):
mount_cmd = ['mount']
if fstype:
mount_cmd.extend(['-t', fstype])
if options is not None:
mount_cmd.extend(options)
mount_cmd.extend([device, mountpoint])
return processutils.execute(*mount_cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
def umount(mountpoint):
processutils.execute('umount', mountpoint, attempts=3, delay_on_retry=True)

View File

@ -77,6 +77,13 @@ def utime(path):
os.utime(path, None)
@nova.privsep.sys_admin_pctxt.entrypoint
def rmdir(path):
if not os.path.exists(path):
raise exception.FileNotFound(file_path=path)
os.rmdir(path)
class path(object):
@staticmethod
@nova.privsep.sys_admin_pctxt.entrypoint

View File

@ -65,10 +65,11 @@ class ConfigDriveTestCase(test.NoDBTestCase):
fileutils.delete_if_exists(imagefile)
@mock.patch.object(utils, 'mkfs', return_value=None)
@mock.patch.object(utils, 'execute', return_value=None)
@mock.patch('nova.privsep.fs.mount', return_value=('', ''))
@mock.patch('nova.privsep.fs.umount', return_value=None)
@mock.patch.object(utils, 'trycmd', return_value=(None, None))
def test_create_configdrive_vfat(self, mock_trycmd,
mock_execute, mock_mkfs):
def test_create_configdrive_vfat(self, mock_trycmd, mock_umount,
mock_mount, mock_mkfs):
CONF.set_override('config_drive_format', 'vfat')
imagefile = None
try:
@ -79,13 +80,8 @@ class ConfigDriveTestCase(test.NoDBTestCase):
mock_mkfs.assert_called_once_with('vfat', mock.ANY,
label='config-2')
mock_trycmd.assert_called_once_with('mount', '-o',
mock.ANY,
mock.ANY,
mock.ANY,
run_as_root=True)
mock_execute.assert_called_once_with('umount', mock.ANY,
run_as_root=True)
mock_mount.assert_called_once()
mock_umount.assert_called_once()
# NOTE(mikal): we can't check for a VFAT output here because the
# filesystem creation stuff has been mocked out because it
# requires root permissions

View File

@ -14,6 +14,7 @@
# under the License.
import mock
import os
import tempfile
import time
@ -271,13 +272,10 @@ class NbdTestCase(test.NoDBTestCase):
# No error logged, device consumed
self.assertFalse(n.get_dev())
def test_do_mount_need_to_specify_fs_type(self):
@mock.patch('nova.privsep.fs.mount', return_value=('', 'broken'))
def test_do_mount_need_to_specify_fs_type(self, mock_mount):
# NOTE(mikal): Bug 1094373 saw a regression where we failed to
# communicate a failed mount properly.
def fake_trycmd(*args, **kwargs):
return '', 'broken'
self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
tempdir = self.useFixture(fixtures.TempDir()).path

View File

@ -174,30 +174,9 @@ class HostMountStateTestCase(test.NoDBTestCase):
def setUp(self):
super(HostMountStateTestCase, self).setUp()
self.mounted = set()
def fake_execute(cmd, *args, **kwargs):
if cmd == 'mount':
path = args[-1]
if path in self.mounted:
raise processutils.ProcessExecutionError('Already mounted')
self.mounted.add(path)
elif cmd == 'umount':
path = args[-1]
if path not in self.mounted:
raise processutils.ProcessExecutionError('Not mounted')
self.mounted.remove(path)
def fake_ismount(path):
return path in self.mounted
mock_execute = mock.MagicMock(side_effect=fake_execute)
self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
mock_execute))
self.useFixture(fixtures.MonkeyPatch('os.path.ismount', fake_ismount))
def test_init(self):
@mock.patch('os.path.ismount',
side_effect=[False, True, True, True, True])
def test_init(self, mock_ismount):
# Test that we initialise the state of MountManager correctly at
# startup
def fake_disk(disk):
@ -216,9 +195,6 @@ class HostMountStateTestCase(test.NoDBTestCase):
mountpoint_a = '/mnt/a'
mountpoint_b = '/mnt/b'
self.mounted.add(mountpoint_a)
self.mounted.add(mountpoint_b)
guests = map(mock_guest, [uuids.instance_a, uuids.instance_b], [
# Local file root disk and a volume on each of mountpoints a and b
[
@ -272,15 +248,13 @@ class HostMountStateTestCase(test.NoDBTestCase):
instance=mock.sentinel.instance):
m.umount(vol, mountpoint, instance)
@staticmethod
def _expected_sentinel_umount_calls(mountpoint=mock.sentinel.mountpoint):
return [mock.call('umount', mountpoint,
attempts=3, delay_on_retry=True,
run_as_root=True),
mock.call('rmdir', mountpoint)]
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_mount_umount(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
@mock.patch('os.path.ismount',
side_effect=[False, True, True, True])
def test_mount_umount(self, mock_ismount, mock_umount, mock_mount,
mock_ensure_tree):
# Mount 2 different volumes from the same export. Test that we only
# mount and umount once.
m = self._get_clean_hostmountstate()
@ -289,47 +263,49 @@ class HostMountStateTestCase(test.NoDBTestCase):
self._sentinel_mount(m, mock.sentinel.vol_a)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
[mock.sentinel.option1, mock.sentinel.option2])])
# Mount vol_b from export. We shouldn't have mounted again
self._sentinel_mount(m, mock.sentinel.vol_b)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
[mock.sentinel.option1, mock.sentinel.option2])])
# Unmount vol_a. We shouldn't have unmounted
self._sentinel_umount(m, mock.sentinel.vol_a)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
[mock.sentinel.option1, mock.sentinel.option2])])
# Unmount vol_b. We should have umounted.
self._sentinel_umount(m, mock.sentinel.vol_b)
expected_calls = [
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)
]
expected_calls.extend(self._expected_sentinel_umount_calls())
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls(expected_calls)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype,
mock.sentinel.export, mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_mount_umount_multi_attach(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
@mock.patch('os.path.ismount',
side_effect=[False, True, True, True])
@mock.patch('nova.privsep.path.rmdir')
def test_mount_umount_multi_attach(self, mock_rmdir, mock_ismount,
mock_umount, mock_mount,
mock_ensure_tree):
# Mount a volume from a single export for 2 different instances. Test
# that we only mount and umount once.
m = self._get_clean_hostmountstate()
@ -341,48 +317,32 @@ class HostMountStateTestCase(test.NoDBTestCase):
# Mount vol_a for instance_a
self._sentinel_mount(m, mock.sentinel.vol_a, instance=instance_a)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_mount.reset_mock()
# Mount vol_a for instance_b. We shouldn't have mounted again
self._sentinel_mount(m, mock.sentinel.vol_a, instance=instance_b)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_mount.assert_not_called()
# Unmount vol_a for instance_a. We shouldn't have unmounted
self._sentinel_umount(m, mock.sentinel.vol_a, instance=instance_a)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_umount.assert_not_called()
# Unmount vol_a for instance_b. We should have umounted.
self._sentinel_umount(m, mock.sentinel.vol_a, instance=instance_b)
expected_calls = [
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)]
expected_calls.extend(self._expected_sentinel_umount_calls())
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mount.utils.execute.assert_has_calls(expected_calls)
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)])
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_mount_concurrent(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
@mock.patch('os.path.ismount',
side_effect=[False, False, True, False, False, True])
@mock.patch('nova.privsep.path.rmdir')
def test_mount_concurrent(self, mock_rmdir, mock_ismount, mock_umount,
mock_mount, mock_ensure_tree):
# This is 2 tests in 1, because the first test is the precondition
# for the second.
@ -419,55 +379,71 @@ class HostMountStateTestCase(test.NoDBTestCase):
ctl_c = TestThreadController(mount_c)
ctl_d = TestThreadController(mount_d)
orig_execute = mount.utils.execute.side_effect
def trap_mount_umount(cmd, *args, **kwargs):
def trap_mount(*args, **kwargs):
# Conditionally wait at a waitpoint named after the command
# we're executing
ctl = TestThreadController.current()
ctl.waitpoint(cmd)
TestThreadController.current().waitpoint('mount')
orig_execute(cmd, *args, **kwargs)
def trap_umount(*args, **kwargs):
# Conditionally wait at a waitpoint named after the command
# we're executing
TestThreadController.current().waitpoint('umount')
mount.utils.execute.side_effect = trap_mount_umount
expected_calls = []
mock_mount.side_effect = trap_mount
mock_umount.side_effect = trap_umount
# Run the first thread until it's blocked while calling mount
ctl_a.runto('mount')
expected_calls.extend([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
# Start the second mount, and ensure it's got plenty of opportunity
# to race.
ctl_b.start()
time.sleep(0.01)
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_not_called()
# Allow ctl_a to complete its mount
ctl_a.runto('mounted')
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_not_called()
# Allow ctl_b to finish. We should not have done a umount
ctl_b.finish()
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_not_called()
# Allow ctl_a to start umounting
# Allow ctl_a to start umounting. We haven't executed rmdir yet,
# because we've blocked during umount
ctl_a.runto('umount')
expected_calls.extend(self._expected_sentinel_umount_calls())
# We haven't executed rmdir yet, beause we've blocked during umount
rmdir = expected_calls.pop()
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
expected_calls.append(rmdir)
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)])
mock_rmdir.assert_not_called()
# While ctl_a is umounting, simultaneously start both ctl_c and
# ctl_d, and ensure they have an opportunity to race
@ -481,17 +457,26 @@ class HostMountStateTestCase(test.NoDBTestCase):
# We should have completed the previous umount, then remounted
# exactly once
expected_calls.extend([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2]),
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)])
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_mount_concurrent_no_interfere(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
@mock.patch('os.path.ismount',
side_effect=[False, False, True, True, True, False])
@mock.patch('nova.privsep.path.rmdir')
def test_mount_concurrent_no_interfere(self, mock_rmdir, mock_ismount,
mock_umount, mock_mount,
mock_ensure_tree):
# Test that concurrent calls to mount volumes in different exports
# run concurrently
m = self._get_clean_hostmountstate()
@ -514,104 +499,77 @@ class HostMountStateTestCase(test.NoDBTestCase):
ctl_a = TestThreadController(mount_a)
ctl_b = TestThreadController(mount_b)
expected_calls = []
ctl_a.runto('mounted')
expected_calls.extend([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint_a,
run_as_root=True)])
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint_a)])
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint_a,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_mount.reset_mock()
ctl_b.finish()
expected_calls.extend([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint_b,
run_as_root=True)])
expected_calls.extend(self._expected_sentinel_umount_calls(
mock.sentinel.mountpoint_b))
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint_b)])
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint_b,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint_b)])
mock_umount.reset_mock()
ctl_a.finish()
expected_calls.extend(self._expected_sentinel_umount_calls(
mock.sentinel.mountpoint_a))
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint_a)])
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_mount_after_failed_umount(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount',
side_effect=processutils.ProcessExecutionError())
@mock.patch('os.path.ismount',
side_effect=[False, True, True, True, False])
@mock.patch('nova.privsep.path.rmdir')
def test_mount_after_failed_umount(self, mock_rmdir, mock_ismount,
mock_umount, mock_mount,
mock_ensure_tree):
# Test that MountManager correctly tracks state when umount fails.
# Test that when umount fails a subsequent mount doesn't try to
# remount it.
# We've already got a fake execute (see setUp) which is ensuring mount,
# umount, and ismount work as expected. We don't want to mess with
# that, except that we want umount to raise an exception. We store the
# original here so we can call it if we're not unmounting, and so we
# can restore it when we no longer want the exception.
orig_execute = mount.utils.execute.side_effect
def raise_on_umount(cmd, *args, **kwargs):
if cmd == 'umount':
raise mount.processutils.ProcessExecutionError()
orig_execute(cmd, *args, **kwargs)
mount.utils.execute.side_effect = raise_on_umount
expected_calls = []
m = self._get_clean_hostmountstate()
# Mount vol_a
self._sentinel_mount(m, mock.sentinel.vol_a)
expected_calls.extend([
mock.call('mount', '-t', mock.sentinel.fstype,
mock.sentinel.option1, mock.sentinel.option2,
mock.sentinel.export, mock.sentinel.mountpoint,
run_as_root=True)])
mock_ensure_tree.assert_has_calls([
mock.call(mock.sentinel.mountpoint)])
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_has_calls([
mock.call(mock.sentinel.fstype, mock.sentinel.export,
mock.sentinel.mountpoint,
[mock.sentinel.option1, mock.sentinel.option2])])
mock_mount.reset_mock()
# Umount vol_a. The umount command will fail.
self._sentinel_umount(m, mock.sentinel.vol_a)
expected_calls.extend(self._expected_sentinel_umount_calls())
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)])
# We should not have called rmdir, because umount failed
expected_calls.pop()
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_rmdir.assert_not_called()
# Mount vol_a again. We should not have called mount, because umount
# failed.
self._sentinel_mount(m, mock.sentinel.vol_a)
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_mount.assert_not_called()
# Prevent future failure of umount
mount.utils.execute.side_effect = orig_execute
mock_umount.side_effect = None
# Umount vol_a successfully
self._sentinel_umount(m, mock.sentinel.vol_a)
expected_calls.extend(self._expected_sentinel_umount_calls())
self.assertEqual(expected_calls, mount.utils.execute.call_args_list)
mock_umount.assert_has_calls([mock.call(mock.sentinel.mountpoint)])
@mock.patch.object(mount.LOG, 'error')
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_umount_log_failure(self, mock_ensure_tree, mock_LOG_error):
# Test that we log an error when umount fails
orig_execute = mount.utils.execute.side_effect
def raise_on_umount(cmd, *args, **kwargs):
if cmd == 'umount':
raise mount.processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: device is busy.')
orig_execute(cmd, *args, **kwargs)
mount.utils.execute.side_effect = raise_on_umount
@mock.patch('nova.privsep.fs.mount')
@mock.patch('os.path.ismount')
@mock.patch('nova.privsep.fs.umount')
def test_umount_log_failure(self, mock_umount, mock_ismount, mock_mount,
mock_ensure_tree, mock_LOG_error):
mock_umount.side_effect = mount.processutils.ProcessExecutionError(
None, None, None, 'umount', 'umount: device is busy.')
mock_ismount.side_effect = [False, True, True]
m = self._get_clean_hostmountstate()

View File

@ -12,7 +12,6 @@
import os
import fixtures
import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
@ -35,31 +34,13 @@ class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
m.host_up(self.fake_host)
self.flags(nfs_mount_point_base=self.mnt_base, group='libvirt')
# Caution: this is also faked by the superclass
orig_execute = utils.execute
mounted = [False]
def fake_execute(*cmd, **kwargs):
orig_execute(*cmd, **kwargs)
if cmd[0] == 'mount':
mounted[0] = True
if cmd[0] == 'umount':
mounted[0] = False
self.useFixture(fixtures.MonkeyPatch('nova.utils.execute',
fake_execute))
# Mock ismount to return the current mount state
# N.B. This is only valid for tests which mount and unmount a single
# directory.
self.useFixture(fixtures.MonkeyPatch('os.path.ismount',
lambda *args, **kwargs: mounted[0]))
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_libvirt_nfs_driver(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('os.path.ismount', side_effect=[False, True, False])
@mock.patch('nova.privsep.fs.umount')
@mock.patch('nova.privsep.path.rmdir')
def test_libvirt_nfs_driver(self, mock_rmdir, mock_umount, mock_ismount,
mock_mount, mock_ensure_tree):
libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/nfs/share1'
@ -78,12 +59,11 @@ class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
device_path = os.path.join(export_mnt_base,
connection_info['data']['name'])
self.assertEqual(connection_info['data']['device_path'], device_path)
expected_commands = [
('mount', '-t', 'nfs', export_string, export_mnt_base),
('umount', export_mnt_base),
('rmdir', export_mnt_base)]
mock_ensure_tree.assert_has_calls([mock.call(export_mnt_base)])
self.assertEqual(expected_commands, self.executes)
mock_mount.assert_has_calls([mock.call('nfs', export_string,
export_mnt_base, [])])
mock_umount.assert_has_calls([mock.call(export_mnt_base)])
mock_rmdir.assert_has_calls([mock.call(export_mnt_base)])
def test_libvirt_nfs_driver_get_config(self):
libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_host)
@ -102,7 +82,13 @@ class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
self.assertEqual('native', tree.find('./driver').get('io'))
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_libvirt_nfs_driver_with_opts(self, mock_ensure_tree):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('os.path.ismount', side_effect=[False, True, False])
@mock.patch('nova.privsep.fs.umount')
@mock.patch('nova.privsep.path.rmdir')
def test_libvirt_nfs_driver_with_opts(self, mock_rmdir, mock_umount,
mock_ismount, mock_mount,
mock_ensure_tree):
libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_host)
export_string = '192.168.1.1:/nfs/share1'
options = '-o intr,nfsvers=3'
@ -119,11 +105,9 @@ class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
export_string, export_mnt_base),
('umount', export_mnt_base),
('rmdir', export_mnt_base)
]
mock_ensure_tree.assert_has_calls([mock.call(export_mnt_base)])
self.assertEqual(expected_commands, self.executes)
mock_mount.assert_has_calls([mock.call('nfs', export_string,
export_mnt_base,
['-o', 'intr,nfsvers=3'])])
mock_umount.assert_has_calls([mock.call(export_mnt_base)])
mock_rmdir.assert_has_calls([mock.call(export_mnt_base)])

View File

@ -17,20 +17,19 @@ import mock
from oslo_concurrency import processutils
from nova import test
from nova import utils
from nova.virt.libvirt.volume import remotefs
class RemoteFSTestCase(test.NoDBTestCase):
"""Remote filesystem operations test case."""
@mock.patch.object(utils, 'execute')
@mock.patch('oslo_utils.fileutils.ensure_tree')
def _test_mount_share(self, mock_ensure_tree, mock_execute,
@mock.patch('nova.privsep.fs.mount')
def _test_mount_share(self, mock_mount, mock_ensure_tree,
already_mounted=False):
if already_mounted:
err_msg = 'Device or resource busy'
mock_execute.side_effect = [
mock_mount.side_effect = [
None, processutils.ProcessExecutionError(err_msg)]
remotefs.mount_share(
@ -39,11 +38,11 @@ class RemoteFSTestCase(test.NoDBTestCase):
options=[mock.sentinel.mount_options])
mock_ensure_tree.assert_any_call(mock.sentinel.mount_path)
mock_execute.assert_any_call('mount', '-t', mock.sentinel.export_type,
mock.sentinel.mount_options,
mock.sentinel.export_path,
mock.sentinel.mount_path,
run_as_root=True)
mock_mount.assert_has_calls(
[mock.call(mock.sentinel.export_type,
mock.sentinel.export_path,
mock.sentinel.mount_path,
[mock.sentinel.mount_options])])
def test_mount_new_share(self):
self._test_mount_share()
@ -51,14 +50,13 @@ class RemoteFSTestCase(test.NoDBTestCase):
def test_mount_already_mounted_share(self):
self._test_mount_share(already_mounted=True)
@mock.patch.object(utils, 'execute')
def test_unmount_share(self, mock_execute):
@mock.patch('nova.privsep.fs.umount')
def test_unmount_share(self, mock_umount):
remotefs.unmount_share(
mock.sentinel.mount_path, mock.sentinel.export_path)
mock_execute.assert_any_call('umount', mock.sentinel.mount_path,
run_as_root=True, attempts=3,
delay_on_retry=True)
mock_umount.assert_has_calls(
[mock.call(mock.sentinel.mount_path)])
@mock.patch('tempfile.mkdtemp', return_value='/tmp/Mercury')
@mock.patch('nova.utils.execute')

View File

@ -30,7 +30,10 @@ class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
@mock.patch.object(libvirt_utils, 'is_mounted')
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_libvirt_smbfs_driver(self, mock_ensure_tree, mock_is_mounted):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
def test_libvirt_smbfs_driver(self, mock_umount, mock_mount,
mock_ensure_tree, mock_is_mounted):
mock_is_mounted.return_value = False
libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_host)
@ -45,15 +48,16 @@ class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('mount', '-t', 'cifs', '-o', 'username=guest',
export_string, export_mnt_base),
('umount', export_mnt_base)]
mock_ensure_tree.assert_has_calls([mock.call(export_mnt_base)])
self.assertEqual(expected_commands, self.executes)
mock_mount.assert_has_calls(
[mock.call('cifs', export_string, export_mnt_base,
['-o', 'username=guest'])])
mock_umount.assert_has_calls([mock.call(export_mnt_base)])
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_smbfs_driver_already_mounted(self, mock_is_mounted):
@mock.patch('nova.privsep.fs.umount')
def test_libvirt_smbfs_driver_already_mounted(self, mock_umount,
mock_is_mounted):
libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_host)
export_string = '//192.168.1.1/volumes'
export_mnt_base = os.path.join(self.mnt_base,
@ -66,9 +70,7 @@ class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
mock_umount.assert_has_calls([mock.call(export_mnt_base)])
def test_libvirt_smbfs_driver_get_config(self):
libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_host)
@ -86,8 +88,10 @@ class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
@mock.patch.object(libvirt_utils, 'is_mounted')
@mock.patch('oslo_utils.fileutils.ensure_tree')
def test_libvirt_smbfs_driver_with_opts(self, mock_ensure_tree,
mock_is_mounted):
@mock.patch('nova.privsep.fs.mount')
@mock.patch('nova.privsep.fs.umount')
def test_libvirt_smbfs_driver_with_opts(self, mock_umount, mock_mount,
mock_ensure_tree, mock_is_mounted):
mock_is_mounted.return_value = False
libvirt_driver = smbfs.LibvirtSMBFSVolumeDriver(self.fake_host)
@ -104,9 +108,8 @@ class LibvirtSMBFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
libvirt_driver.disconnect_volume(connection_info, "vde",
mock.sentinel.instance)
expected_commands = [
('mount', '-t', 'cifs', '-o', 'user=guest,uid=107,gid=105',
export_string, export_mnt_base),
('umount', export_mnt_base)]
mock_ensure_tree.assert_has_calls([mock.call(export_mnt_base)])
self.assertEqual(expected_commands, self.executes)
mock_mount.assert_has_calls(
[mock.call('cifs', export_string, export_mnt_base,
['-o', 'user=guest,uid=107,gid=105'])])
mock_umount.assert_has_calls([mock.call(export_mnt_base)])

View File

@ -209,7 +209,8 @@ class TestVirtDisk(test.NoDBTestCase):
self.assertEqual(disk_api.setup_container(image, container_dir),
'/dev/fake')
def test_lxc_teardown_container(self):
@mock.patch('nova.privsep.fs.umount')
def test_lxc_teardown_container(self, mock_umount):
def proc_mounts(mount_point):
mount_points = {
@ -226,37 +227,40 @@ class TestVirtDisk(test.NoDBTestCase):
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
expected_commands += [('losetup', '--detach', '/dev/loop0')]
mock_umount.assert_has_calls([mock.call('/dev/loop0')])
mock_umount.reset_mock()
disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
mock_umount.assert_has_calls([mock.call('/dev/mapper/loop0p1')])
mock_umount.reset_mock()
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
mock_umount.assert_has_calls([mock.call('/dev/nbd15')])
mock_umount.reset_mock()
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
mock_umount.assert_has_calls([mock.call('/dev/mapper/nbd15p1')])
mock_umount.reset_mock()
# NOTE(thomasem): Not adding any commands in this case, because we're
# not expecting an additional umount for LocalBlockImages. This is to
# assert that no additional commands are run in this case.
disk_api.teardown_container('/dev/volume-group/uuid_disk')
mock_umount.assert_not_called()
self.assertEqual(self.executes, expected_commands)

View File

@ -953,8 +953,10 @@ class XenAPIVMTestCase(stubs.XenAPITestBase,
@mock.patch.object(nova.privsep.path, 'makedirs')
@mock.patch.object(nova.privsep.path, 'chown')
@mock.patch.object(nova.privsep.path, 'chmod')
def test_spawn_netinject_file(self, chmod, chown, mkdir, write_file,
read_link):
@mock.patch.object(nova.privsep.fs, 'mount', return_value=(None, None))
@mock.patch.object(nova.privsep.fs, 'umount')
def test_spawn_netinject_file(self, umount, mount, chmod, chown, mkdir,
write_file, read_link):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self, injected=True)

View File

@ -25,6 +25,7 @@ import six
import nova.conf
from nova import exception
from nova.objects import fields
import nova.privsep.fs
from nova import utils
from nova import version
@ -107,12 +108,9 @@ class ConfigDriveBuilder(object):
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
_, err = nova.privsep.fs.mount(
None, path, mountdir,
['-o', 'loop,uid=%d,gid=%d' % (os.getuid(), os.getgid())])
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
@ -127,7 +125,7 @@ class ConfigDriveBuilder(object):
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
nova.privsep.fs.umount(mountdir)
def make_drive(self, path):
"""Make the config drive.

View File

@ -22,6 +22,7 @@ from oslo_utils import importutils
from nova import exception
from nova.i18n import _
import nova.privsep.fs
from nova import utils
from nova.virt.image import model as imgmodel
@ -248,8 +249,8 @@ class Mount(object):
"""Mount the device into the file system."""
LOG.debug("Mount %(dev)s on %(dir)s",
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
discard_warnings=True, run_as_root=True)
out, err = nova.privsep.fs.mount(None, self.mapped_device,
self.mount_dir)
if err:
self.error = _('Failed to mount filesystem: %s') % err
LOG.debug(self.error)
@ -264,7 +265,7 @@ class Mount(object):
return
self.flush_dev()
LOG.debug("Umount %s", self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
nova.privsep.fs.umount(self.mapped_device)
self.mounted = False
def flush_dev(self):

View File

@ -25,7 +25,8 @@ import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
import nova.privsep.fs
import nova.privsep.path
CONF = nova.conf.CONF
LOG = log.getLogger(__name__)
@ -292,20 +293,19 @@ class _HostMountState(object):
'mountpoint': mountpoint, 'options': options,
'gen': self.generation})
with self._get_locked(mountpoint) as mount:
if not os.path.ismount(mountpoint):
if os.path.ismount(mountpoint):
LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '
'mountpoint already mounted'),
{'mountpoint': mountpoint, 'gen': self.generation})
else:
LOG.debug('Mounting %(mountpoint)s generation %(gen)s',
{'mountpoint': mountpoint, 'gen': self.generation})
fileutils.ensure_tree(mountpoint)
mount_cmd = ['mount', '-t', fstype]
if options is not None:
mount_cmd.extend(options)
mount_cmd.extend([export, mountpoint])
try:
utils.execute(*mount_cmd, run_as_root=True)
except Exception:
nova.privsep.fs.mount(fstype, export, mountpoint, options)
except processutils.ProcessExecutionError():
# Check to see if mountpoint is mounted despite the error
# eg it was already mounted
if os.path.ismount(mountpoint):
@ -379,20 +379,13 @@ class _HostMountState(object):
{'mountpoint': mountpoint, 'gen': self.generation})
try:
utils.execute('umount', mountpoint, run_as_root=True,
attempts=3, delay_on_retry=True)
nova.privsep.fs.umount(mountpoint)
except processutils.ProcessExecutionError as ex:
LOG.error("Couldn't unmount %(mountpoint)s: %(reason)s",
{'mountpoint': mountpoint, 'reason': six.text_type(ex)})
if not os.path.ismount(mountpoint):
try:
utils.execute('rmdir', mountpoint)
except processutils.ProcessExecutionError as ex:
LOG.error("Couldn't remove directory %(mountpoint)s: "
"%(reason)s",
{'mountpoint': mountpoint,
'reason': six.text_type(ex)})
nova.privsep.path.rmdir(mountpoint)
return False
return True

View File

@ -26,6 +26,7 @@ import six
import nova.conf
from nova.i18n import _
import nova.privsep.fs
from nova import utils
LOG = logging.getLogger(__name__)
@ -44,13 +45,8 @@ def mount_share(mount_path, export_path,
"""
fileutils.ensure_tree(mount_path)
mount_cmd = ['mount', '-t', export_type]
if options is not None:
mount_cmd.extend(options)
mount_cmd.extend([export_path, mount_path])
try:
utils.execute(*mount_cmd, run_as_root=True)
nova.privsep.fs.mount(export_type, export_path, mount_path, options)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.warning("%s is already mounted", export_path)
@ -65,8 +61,7 @@ def unmount_share(mount_path, export_path):
:param export_path: path of the remote export to be unmounted
"""
try:
utils.execute('umount', mount_path, run_as_root=True,
attempts=3, delay_on_retry=True)
nova.privsep.fs.umount(mount_path)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in six.text_type(exc):
LOG.debug("The share %s is still in use.", export_path)

View File

@ -53,6 +53,7 @@ from nova.i18n import _
from nova.network import model as network_model
from nova.objects import diagnostics
from nova.objects import fields as obj_fields
import nova.privsep.fs
from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
@ -2440,12 +2441,11 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
def _mount_filesystem(dev_path, mount_point):
"""mounts the device specified by dev_path in mount_point."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
_out, err = nova.privsep.fs.mount('ext2,ext3,ext4,reiserfs',
dev_path, mount_point, None)
except processutils.ProcessExecutionError as e:
err = six.text_type(e)
return err
@ -2478,7 +2478,7 @@ def _mounted_processing(device, key, net, metadata):
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
nova.privsep.fs.umount(dev_path)
else:
LOG.info('Failed to mount filesystem (expected for '
'non-linux instances): %s', err)

View File

@ -3,7 +3,9 @@ upgrade:
- |
A sys-admin privsep daemon has been added and needs to be included in your
rootwrap configuration.
- |
Calls to mount in the virt disk api no longer ignore the value of stderr.
- |
The following commands are no longer required to be listed in your rootwrap
configuration: cat; chown; cryptsetup; dd; mkdir; ploop; prl_disk_tool;
readlink; tee; touch.
configuration: cat; chown; cryptsetup; dd; mkdir; mount; ploop;
prl_disk_tool; readlink; tee; touch; and umount.