fix unmounting of LXC containers
There were two issues here.
1. There was a global object stored for all instances,
thus the last mounted instance was always unmounted.
2. Even if there was only a single LXC instance in use,
the global object would be lost on restart of Nova.
Therefore we reset the internal state for the mount object,
by passing in the mount point to destroy_container(),
and querying the device in use for that mount point.
Fixes bug: 971621
Change-Id: I5442442f00d93f5e8b82f492d62918419db5cd3b
Cherry-picked: a5184d5dbf
This commit is contained in:
parent
09217abddc
commit
272b98d718
|
@ -15,6 +15,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import test
|
||||
|
@ -90,6 +92,70 @@ class TestVirtDisk(test.TestCase):
|
|||
def setUp(self):
|
||||
super(TestVirtDisk, self).setUp()
|
||||
|
||||
self.executes = []
|
||||
|
||||
def fake_execute(*cmd, **kwargs):
|
||||
self.executes.append(cmd)
|
||||
return None, None
|
||||
|
||||
self.stubs.Set(utils, 'execute', fake_execute)
|
||||
|
||||
def test_lxc_destroy_container(self):
|
||||
|
||||
def proc_mounts(self, mount_point):
|
||||
mount_points = {
|
||||
'/mnt/loop/nopart': '/dev/loop0',
|
||||
'/mnt/loop/part': '/dev/mapper/loop0p1',
|
||||
'/mnt/nbd/nopart': '/dev/nbd15',
|
||||
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
|
||||
'/mnt/guestfs': 'guestmount',
|
||||
}
|
||||
return mount_points[mount_point]
|
||||
|
||||
self.stubs.Set(os.path, 'exists', lambda _: True)
|
||||
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
|
||||
expected_commands = []
|
||||
|
||||
disk_api.destroy_container('/mnt/loop/nopart')
|
||||
expected_commands += [
|
||||
('umount', '/dev/loop0'),
|
||||
('losetup', '--detach', '/dev/loop0'),
|
||||
]
|
||||
|
||||
disk_api.destroy_container('/mnt/loop/part')
|
||||
expected_commands += [
|
||||
('umount', '/dev/mapper/loop0p1'),
|
||||
('kpartx', '-d', '/dev/loop0'),
|
||||
('losetup', '--detach', '/dev/loop0'),
|
||||
]
|
||||
|
||||
disk_api.destroy_container('/mnt/nbd/nopart')
|
||||
expected_commands += [
|
||||
('umount', '/dev/nbd15'),
|
||||
('qemu-nbd', '-d', '/dev/nbd15'),
|
||||
]
|
||||
|
||||
disk_api.destroy_container('/mnt/nbd/part')
|
||||
expected_commands += [
|
||||
('umount', '/dev/mapper/nbd15p1'),
|
||||
('kpartx', '-d', '/dev/nbd15'),
|
||||
('qemu-nbd', '-d', '/dev/nbd15'),
|
||||
]
|
||||
|
||||
disk_api.destroy_container('/mnt/guestfs')
|
||||
expected_commands += [
|
||||
('fusermount', '-u', '/mnt/guestfs'),
|
||||
]
|
||||
# It's not worth trying to match the last timeout command
|
||||
self.executes.pop()
|
||||
|
||||
self.assertEqual(self.executes, expected_commands)
|
||||
|
||||
|
||||
class TestVirtDiskPaths(test.TestCase):
|
||||
def setUp(self):
|
||||
super(TestVirtDiskPaths, self).setUp()
|
||||
|
||||
real_execute = utils.execute
|
||||
|
||||
def nonroot_execute(*cmd_parts, **kwargs):
|
||||
|
|
|
@ -144,6 +144,8 @@ def unbind(target):
|
|||
class _DiskImage(object):
|
||||
"""Provide operations on a disk image file."""
|
||||
|
||||
tmp_prefix = 'openstack-disk-mount-tmp'
|
||||
|
||||
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
|
||||
# These passed to each mounter
|
||||
self.image = image
|
||||
|
@ -164,18 +166,51 @@ class _DiskImage(object):
|
|||
if not self.handlers:
|
||||
raise exception.Error(_('no capable image handler configured'))
|
||||
|
||||
if mount_dir:
|
||||
# Note the os.path.ismount() shortcut doesn't
|
||||
# work with libguestfs due to permissions issues.
|
||||
device = self._device_for_path(mount_dir)
|
||||
if device:
|
||||
self._reset(device)
|
||||
|
||||
@staticmethod
|
||||
def _device_for_path(path):
|
||||
device = None
|
||||
with open("/proc/mounts", 'r') as ifp:
|
||||
for line in ifp:
|
||||
fields = line.split()
|
||||
if fields[1] == path:
|
||||
device = fields[0]
|
||||
break
|
||||
return device
|
||||
|
||||
def _reset(self, device):
|
||||
"""Reset internal state for a previously mounted directory."""
|
||||
mounter_cls = self._handler_class(device=device)
|
||||
mounter = mounter_cls(image=self.image,
|
||||
partition=self.partition,
|
||||
mount_dir=self.mount_dir,
|
||||
device=device)
|
||||
self._mounter = mounter
|
||||
|
||||
mount_name = os.path.basename(self.mount_dir or '')
|
||||
self._mkdir = mount_name.startswith(self.tmp_prefix)
|
||||
|
||||
@property
|
||||
def errors(self):
|
||||
"""Return the collated errors from all operations."""
|
||||
return '\n--\n'.join([''] + self._errors)
|
||||
|
||||
@staticmethod
|
||||
def _handler_class(mode):
|
||||
"""Look up the appropriate class to use based on MODE."""
|
||||
def _handler_class(mode=None, device=None):
|
||||
"""Look up the appropriate class to use based on MODE or DEVICE."""
|
||||
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
|
||||
if cls.mode == mode:
|
||||
if mode and cls.mode == mode:
|
||||
return cls
|
||||
raise exception.Error(_("unknown disk image handler: %s") % mode)
|
||||
elif device and cls.device_id_string in device:
|
||||
return cls
|
||||
msg = _("no disk image handler for: %s") % mode or device
|
||||
raise exception.Error(msg)
|
||||
|
||||
def mount(self):
|
||||
"""Mount a disk image, using the object attributes.
|
||||
|
@ -189,7 +224,7 @@ class _DiskImage(object):
|
|||
raise exception.Error(_('image already mounted'))
|
||||
|
||||
if not self.mount_dir:
|
||||
self.mount_dir = tempfile.mkdtemp()
|
||||
self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix)
|
||||
self._mkdir = True
|
||||
|
||||
try:
|
||||
|
@ -258,35 +293,29 @@ def inject_files(image, files, partition=None, use_cow=False):
|
|||
raise exception.Error(img.errors)
|
||||
|
||||
|
||||
def setup_container(image, container_dir=None, use_cow=False):
|
||||
def setup_container(image, container_dir, use_cow=False):
|
||||
"""Setup the LXC container.
|
||||
|
||||
It will mount the loopback image to the container directory in order
|
||||
to create the root filesystem for the container.
|
||||
|
||||
LXC does not support qcow2 images yet.
|
||||
"""
|
||||
try:
|
||||
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
|
||||
if img.mount():
|
||||
return img
|
||||
else:
|
||||
if not img.mount():
|
||||
raise exception.Error(img.errors)
|
||||
except Exception, exn:
|
||||
LOG.exception(_('Failed to mount filesystem: %s'), exn)
|
||||
|
||||
|
||||
def destroy_container(img):
|
||||
def destroy_container(container_dir):
|
||||
"""Destroy the container once it terminates.
|
||||
|
||||
It will umount the container that is mounted,
|
||||
and delete any linked devices.
|
||||
|
||||
LXC does not support qcow2 images yet.
|
||||
"""
|
||||
try:
|
||||
if img:
|
||||
img.umount()
|
||||
img = _DiskImage(image=None, mount_dir=container_dir)
|
||||
img.umount()
|
||||
except Exception, exn:
|
||||
LOG.exception(_('Failed to remove container: %s'), exn)
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ from nova.virt.disk import mount
|
|||
class Mount(mount.Mount):
|
||||
"""libguestfs support for arbitrary images."""
|
||||
mode = 'guestfs'
|
||||
device_id_string = 'guest'
|
||||
|
||||
def map_dev(self):
|
||||
self.mapped = True
|
||||
|
|
|
@ -22,6 +22,7 @@ from nova.virt.disk import mount
|
|||
class Mount(mount.Mount):
|
||||
"""loop back support for raw images."""
|
||||
mode = 'loop'
|
||||
device_id_string = mode
|
||||
|
||||
def get_dev(self):
|
||||
out, err = utils.trycmd('losetup', '--find', '--show', self.image,
|
||||
|
|
|
@ -30,7 +30,7 @@ class Mount(object):
|
|||
to be called in that order.
|
||||
"""
|
||||
|
||||
def __init__(self, image, mount_dir, partition=None):
|
||||
def __init__(self, image, mount_dir, partition=None, device=None):
|
||||
|
||||
# Input
|
||||
self.image = image
|
||||
|
@ -42,7 +42,24 @@ class Mount(object):
|
|||
|
||||
# Internal
|
||||
self.linked = self.mapped = self.mounted = False
|
||||
self.device = self.mapped_device = None
|
||||
self.device = self.mapped_device = device
|
||||
|
||||
# Reset to mounted dir if possible
|
||||
self.reset_dev()
|
||||
|
||||
def reset_dev(self):
|
||||
"""Reset device paths to allow unmounting."""
|
||||
if not self.device:
|
||||
return
|
||||
|
||||
self.linked = self.mapped = self.mounted = True
|
||||
|
||||
device = self.device
|
||||
if os.path.isabs(device) and os.path.exists(device):
|
||||
if device.startswith('/dev/mapper/'):
|
||||
device = os.path.basename(device)
|
||||
device, self.partition = device.rsplit('p', 1)
|
||||
self.device = os.path.join('/dev', device)
|
||||
|
||||
def get_dev(self):
|
||||
"""Make the image available as a block device in the file system."""
|
||||
|
|
|
@ -40,6 +40,7 @@ FLAGS.register_opts(nbd_opts)
|
|||
class Mount(mount.Mount):
|
||||
"""qemu-nbd support disk images."""
|
||||
mode = 'nbd'
|
||||
device_id_string = mode
|
||||
|
||||
# NOTE(padraig): There are three issues with this nbd device handling
|
||||
# 1. max_nbd_devices should be inferred (#861504)
|
||||
|
@ -69,7 +70,11 @@ class Mount(mount.Mount):
|
|||
return device
|
||||
|
||||
def _free_nbd(self, device):
|
||||
self._DEVICES.append(device)
|
||||
# The device could already be present if unget_dev
|
||||
# is called right after a nova restart
|
||||
# (when destroying an LXC container for example).
|
||||
if not device in self._DEVICES:
|
||||
self._DEVICES.append(device)
|
||||
|
||||
def get_dev(self):
|
||||
device = self._allocate_nbd()
|
||||
|
|
|
@ -223,7 +223,6 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
self._host_state = None
|
||||
self._initiator = None
|
||||
self._wrapped_conn = None
|
||||
self.container = None
|
||||
self.read_only = read_only
|
||||
if FLAGS.firewall_driver not in firewall.drivers:
|
||||
FLAGS.set_default('firewall_driver', firewall.drivers[0])
|
||||
|
@ -491,7 +490,10 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
LOG.info(_('Deleting instance files %(target)s') % locals(),
|
||||
instance=instance)
|
||||
if FLAGS.libvirt_type == 'lxc':
|
||||
disk.destroy_container(self.container)
|
||||
container_dir = os.path.join(FLAGS.instances_path,
|
||||
instance['name'],
|
||||
'rootfs')
|
||||
disk.destroy_container(container_dir=container_dir)
|
||||
if os.path.exists(target):
|
||||
shutil.rmtree(target)
|
||||
|
||||
|
@ -1209,7 +1211,9 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
|
||||
|
||||
if FLAGS.libvirt_type == 'lxc':
|
||||
container_dir = '%s/rootfs' % basepath(suffix='')
|
||||
container_dir = os.path.join(FLAGS.instances_path,
|
||||
instance['name'],
|
||||
'rootfs')
|
||||
libvirt_utils.ensure_tree(container_dir)
|
||||
|
||||
# NOTE(dprince): for rescue console.log may already exist... chown it.
|
||||
|
@ -1405,9 +1409,9 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
instance=instance)
|
||||
|
||||
if FLAGS.libvirt_type == 'lxc':
|
||||
self.container = disk.setup_container(basepath('disk'),
|
||||
container_dir=container_dir,
|
||||
use_cow=FLAGS.use_cow_images)
|
||||
disk.setup_container(basepath('disk'),
|
||||
container_dir=container_dir,
|
||||
use_cow=FLAGS.use_cow_images)
|
||||
|
||||
if FLAGS.libvirt_type == 'uml':
|
||||
libvirt_utils.chown(basepath('disk'), 'root')
|
||||
|
|
Loading…
Reference in New Issue