PowerVM Driver: Localdisk

Add PowerVM Driver support for local ephemeral disk.

Change-Id: I9251287451bc2f800ef4a230c3c01598f37b5ad3
Blueprint: powervm-localdisk
This commit is contained in:
esberglu 2018-03-02 11:41:13 -06:00 committed by Eric Fried
parent 549e5a2226
commit 026c2a61d0
8 changed files with 594 additions and 13 deletions

View File

@ -674,6 +674,10 @@ driver-impl-ironic=missing
driver-impl-libvirt-vz-vm=complete
driver-impl-libvirt-vz-ct=complete
driver-impl-powervm=complete
driver-notes-powervm=When using the localdisk disk driver, snapshot is only
supported if I/O is being hosted by the management partition. If hosting I/O
on traditional VIOS, we are limited by the fact that a VSCSI device can't be
mapped to two partitions (the VIOS and the management) at once.
[operation.suspend]
title=Suspend instance

View File

@ -1,5 +1,4 @@
# Copyright 2018 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -34,8 +33,26 @@ powervm_opts = [
compute power given to each vCPU. E.g. A value of 1.0 means a
whole physical processor, whereas 0.05 means 1/20th of a physical
processor.
"""
),
"""),
cfg.StrOpt('disk_driver',
choices=['localdisk', 'ssp'], ignore_case=True,
default='localdisk',
help="""The disk driver to use for PowerVM disks. PowerVM
provides support for localdisk and PowerVM Shared Storage
Pool disk drivers.
Related options:
* volume_group_name - required when using localdisk
"""),
cfg.StrOpt('volume_group_name',
default='',
help='Volume Group to use for block device operations. If '
'disk_driver is localdisk, then this attribute must be '
'specified. It is strongly recommended NOT to use '
'rootvg since that is used by the management partition '
'and filling it will cause failures.'),
]

View File

@ -2269,3 +2269,8 @@ class DeviceDeletionException(NovaException):
msg_fmt = _("Device %(devpath)s is still present on the management "
"partition after attempting to delete it. Polled %(polls)d "
"times over %(timeout)d seconds.")
class OptRequiredIfOtherOptValue(NovaException):
msg_fmt = _("The %(then_opt)s option is required if %(if_opt)s is "
"specified as '%(if_value)s'.")

View File

@ -0,0 +1,312 @@
# Copyright 2015, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import exception
from nova import test
from pypowervm import const as pvm_const
from pypowervm.tasks import storage as tsk_stg
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova.tests import uuidsentinel as uuids
from nova.virt.powervm.disk import driver as disk_dvr
from nova.virt.powervm.disk import localdisk
class TestLocalDisk(test.NoDBTestCase):
"""Unit Tests for the LocalDisk storage driver."""
def setUp(self):
super(TestLocalDisk, self).setUp()
self.adpt = mock.Mock()
# The mock VIOS needs to have scsi_mappings as a list. Internals are
# set by individual test cases as needed.
smaps = [mock.Mock()]
self.vio_wrap = mock.create_autospec(
pvm_vios.VIOS, instance=True, scsi_mappings=smaps,
uuid='vios-uuid')
# Return the mgmt uuid.
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.mgmt.mgmt_uuid', autospec=True)).mock
self.mgmt_uuid.return_value = 'mgmt_uuid'
self.pvm_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.vm.get_pvm_uuid')).mock
self.pvm_uuid.return_value = 'pvm_uuid'
# Set up for the mocks for the disk adapter.
self.mock_find_vg = self.useFixture(fixtures.MockPatch(
'pypowervm.tasks.storage.find_vg', autospec=True)).mock
self.vg_uuid = uuids.vg_uuid
self.vg = mock.Mock(spec=pvm_stg.VG, uuid=self.vg_uuid)
self.mock_find_vg.return_value = (self.vio_wrap, self.vg)
self.flags(volume_group_name='fakevg', group='powervm')
# Mock the feed tasks.
self.mock_afs = self.useFixture(fixtures.MockPatch(
'pypowervm.utils.transaction.FeedTask.add_functor_subtask',
autospec=True)).mock
self.mock_wtsk = mock.create_autospec(
pvm_tx.WrapperTask, instance=True)
self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
self.mock_ftsk.configure_mock(
wrapper_tasks={'vios-uuid': self.mock_wtsk})
# Create the adapter.
self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
def test_init(self):
# Localdisk adapter already initialized in setUp()
# From super __init__()
self.assertEqual(self.adpt, self.ld_adpt._adapter)
self.assertEqual('host_uuid', self.ld_adpt._host_uuid)
self.assertEqual('mgmt_uuid', self.ld_adpt.mp_uuid)
# From LocalStorage __init__()
self.assertEqual('fakevg', self.ld_adpt.vg_name)
self.mock_find_vg.assert_called_once_with(self.adpt, 'fakevg')
self.assertEqual('vios-uuid', self.ld_adpt._vios_uuid)
self.assertEqual(self.vg_uuid, self.ld_adpt.vg_uuid)
self.assertFalse(self.ld_adpt.capabilities['shared_storage'])
self.assertFalse(self.ld_adpt.capabilities['has_imagecache'])
self.assertFalse(self.ld_adpt.capabilities['snapshot'])
# Assert snapshot capability is true if hosting I/O on mgmt partition.
self.mgmt_uuid.return_value = 'vios-uuid'
self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
self.assertTrue(self.ld_adpt.capabilities['snapshot'])
# Assert volume_group_name is required.
self.flags(volume_group_name=None, group='powervm')
self.assertRaises(exception.OptRequiredIfOtherOptValue,
localdisk.LocalStorage, self.adpt, 'host_uuid')
def test_vios_uuids(self):
self.assertEqual(['vios-uuid'], self.ld_adpt._vios_uuids)
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
@mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
def test_disk_match_func(self, mock_disk_name, mock_gen_match):
mock_disk_name.return_value = 'disk_name'
func = self.ld_adpt._disk_match_func('disk_type', 'instance')
mock_disk_name.assert_called_once_with(
'disk_type', 'instance', short=True)
mock_gen_match.assert_called_once_with(
pvm_stg.VDisk, names=['disk_name'])
self.assertEqual(mock_gen_match.return_value, func)
@mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
def test_capacity(self, mock_vg):
"""Tests the capacity methods."""
mock_vg.return_value = mock.Mock(
capacity='5120', available_size='2048')
self.assertEqual(5120.0, self.ld_adpt.capacity)
self.assertEqual(3072.0, self.ld_adpt.capacity_used)
@mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
@mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
def test_delete_disks(self, mock_vg, mock_rm_vg):
self.ld_adpt.delete_disks('storage_elems')
mock_vg.assert_called_once_with()
mock_rm_vg.assert_called_once_with(
mock_vg.return_value, vdisks='storage_elems')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
def test_detach_disk(self, mock_match_fn, mock_rm_maps, mock_vios):
mock_match_fn.return_value = 'match_func'
mock_vios.return_value = self.vio_wrap
mock_map1 = mock.Mock(backing_storage='back_stor1')
mock_map2 = mock.Mock(backing_storage='back_stor2')
mock_rm_maps.return_value = [mock_map1, mock_map2]
back_stores = self.ld_adpt.detach_disk('instance')
self.assertEqual(['back_stor1', 'back_stor2'], back_stores)
mock_match_fn.assert_called_once_with(pvm_stg.VDisk)
mock_vios.assert_called_once_with(
self.ld_adpt._adapter, uuid='vios-uuid',
xag=[pvm_const.XAG.VIO_SMAP])
mock_rm_maps.assert_called_with(self.vio_wrap, 'pvm_uuid',
match_func=mock_match_fn.return_value)
mock_vios.return_value.update.assert_called_once()
@mock.patch('pypowervm.tasks.scsi_mapper.remove_vdisk_mapping',
autospec=True)
def test_disconnect_disk_from_mgmt(self, mock_rm_vdisk_map):
self.ld_adpt.disconnect_disk_from_mgmt('vios-uuid', 'disk_name')
mock_rm_vdisk_map.assert_called_with(
self.ld_adpt._adapter, 'vios-uuid', 'mgmt_uuid',
disk_names=['disk_name'])
@mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._upload_image')
def test_create_disk_from_image(self, mock_upload_image):
mock_image_meta = mock.Mock()
mock_image_meta.size = 30
mock_upload_image.return_value = 'mock_img'
self.ld_adpt.create_disk_from_image(
'context', 'instance', mock_image_meta)
mock_upload_image.assert_called_once_with(
'context', 'instance', mock_image_meta)
@mock.patch('nova.image.api.API.download')
@mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter')
@mock.patch('pypowervm.tasks.storage.upload_new_vdisk')
@mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
def test_upload_image(self, mock_name, mock_upload, mock_iter, mock_dl):
mock_meta = mock.Mock(id='1', size=1073741824, disk_format='raw')
mock_upload.return_value = ['mock_img']
mock_img = self.ld_adpt._upload_image('context', 'inst', mock_meta)
self.assertEqual('mock_img', mock_img)
mock_name.assert_called_once_with(
disk_dvr.DiskType.BOOT, 'inst', short=True)
mock_dl.assert_called_once_with('context', '1')
mock_iter.assert_called_once_with(mock_dl.return_value)
mock_upload.assert_called_once_with(
self.adpt, 'vios-uuid', self.vg_uuid, mock_iter.return_value,
mock_name.return_value, 1073741824, d_size=1073741824,
upload_type=tsk_stg.UploadType.IO_STREAM, file_format='raw')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
autospec=True)
def test_attach_disk(self, mock_bldmap, mock_addmap):
def test_afs(add_func):
# Verify the internal add_func
self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
mock_bldmap.assert_called_once_with(
self.ld_adpt._host_uuid, self.vio_wrap, 'pvm_uuid',
'disk_info')
mock_addmap.assert_called_once_with(
self.vio_wrap, mock_bldmap.return_value)
self.mock_wtsk.add_functor_subtask.side_effect = test_afs
self.ld_adpt.attach_disk('instance', 'disk_info', self.mock_ftsk)
self.pvm_uuid.assert_called_once_with('instance')
self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
@mock.patch('pypowervm.wrappers.storage.VG.get')
def test_get_vg_wrap(self, mock_vg):
vg_wrap = self.ld_adpt._get_vg_wrap()
self.assertEqual(mock_vg.return_value, vg_wrap)
mock_vg.assert_called_once_with(
self.adpt, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS,
parent_uuid='vios-uuid')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage.'
'_disk_match_func')
def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
mock_vios):
mock_vios.return_value = self.vio_wrap
# No maps found
mock_findmaps.return_value = None
devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
self.pvm_uuid.assert_called_once_with('inst')
mock_match_fn.assert_called_once_with(disk_dvr.DiskType.BOOT, 'inst')
mock_vios.assert_called_once_with(
self.adpt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
mock_findmaps.assert_called_once_with(
self.vio_wrap.scsi_mappings,
client_lpar_id='pvm_uuid',
match_func=mock_match_fn.return_value)
self.assertIsNone(devname)
# Good map
mock_lu = mock.Mock()
mock_lu.server_adapter.backing_dev_name = 'devname'
mock_findmaps.return_value = [mock_lu]
devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
self.assertEqual('devname', devname)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
def test_get_bootdisk_iter(self, mock_vios, mock_find_maps, mock_lw):
inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
# Good path
mock_vios.return_value = vios1
for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
self.assertEqual(vios1.uuid, vios.uuid)
mock_vios.assert_called_once_with(
self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
# Not found, no storage of that name.
mock_vios.reset_mock()
mock_find_maps.return_value = []
for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
self.fail('Should not have found any storage elements.')
mock_vios.assert_called_once_with(
self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
@mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_bootdisk_iter',
autospec=True)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
def test_connect_instance_disk_to_mgmt(self, mock_add, mock_lw, mock_iter):
inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
# Good path
mock_iter.return_value = [(vios1.scsi_mappings[0].backing_storage,
vios1)]
vdisk, vios = self.ld_adpt.connect_instance_disk_to_mgmt(inst)
self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
self.assertIs(vios1, vios)
self.assertEqual(1, mock_add.call_count)
mock_add.assert_called_with('host_uuid', vios, 'mgmt_uuid', vdisk)
# add_vscsi_mapping raises. Show-stopper since only one VIOS.
mock_add.reset_mock()
mock_add.side_effect = Exception
self.assertRaises(exception.InstanceDiskMappingFailed,
self.ld_adpt.connect_instance_disk_to_mgmt, inst)
self.assertEqual(1, mock_add.call_count)
# Not found
mock_add.reset_mock()
mock_iter.return_value = []
self.assertRaises(exception.InstanceDiskMappingFailed,
self.ld_adpt.connect_instance_disk_to_mgmt, inst)
self.assertFalse(mock_add.called)
def _bld_mocks_for_instance_disk(self):
inst = mock.Mock()
inst.name = 'Name Of Instance'
inst.uuid = uuids.inst_uuid
lpar_wrap = mock.Mock()
lpar_wrap.id = 2
vios1 = self.vio_wrap
back_stor_name = 'b_Name_Of__' + inst.uuid[:4]
vios1.scsi_mappings[0].backing_storage.name = back_stor_name
return inst, lpar_wrap, vios1

View File

@ -73,10 +73,10 @@ class TestPowerVMDriver(test.NoDBTestCase):
@mock.patch('nova.image.API')
@mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter')
@mock.patch('oslo_utils.importutils.import_object_ns', autospec=True)
@mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
@mock.patch('pypowervm.tasks.partition.validate_vios_ready', autospec=True)
def test_init_host(self, mock_vvr, mock_sys, mock_ssp, mock_scrub,
def test_init_host(self, mock_vvr, mock_sys, mock_import, mock_scrub,
mock_img):
mock_hostw = mock.Mock(uuid='uuid')
mock_sys.get.return_value = [mock_hostw]
@ -90,8 +90,10 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.assertEqual(mock_hostw, self.drv.host_wrapper)
mock_scrub.assert_called_once_with(self.drv.adapter)
mock_scrub.return_value.execute.assert_called_once_with()
mock_ssp.assert_called_once_with(self.drv.adapter, 'uuid')
self.assertEqual(mock_ssp.return_value, self.drv.disk_dvr)
mock_import.assert_called_once_with(
'nova.virt.powervm.disk', 'localdisk.LocalStorage',
self.drv.adapter, 'uuid')
self.assertEqual(mock_import.return_value, self.drv.disk_dvr)
mock_img.assert_called_once_with()
self.assertEqual(mock_img.return_value, self.drv.image_api)
@ -309,6 +311,10 @@ class TestPowerVMDriver(test.NoDBTestCase):
mock_rm.assert_called_once_with(
stg_elem='stg_elem', vios_wrap='vios_wrap', disk_path='disk_path')
self.drv.disk_dvr.capabilities = {'snapshot': False}
self.assertRaises(exception.NotSupportedWithOption, self.drv.snapshot,
'context', self.inst, 'image_id', 'update_task_state')
def test_power_on(self):
self.drv.power_on('context', self.inst, 'network_info')
self.pwron.assert_called_once_with(self.adp, self.inst)

View File

@ -0,0 +1,211 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_log.log as logging
from pypowervm import const as pvm_const
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova import conf
from nova import exception
from nova import image
from nova.virt.powervm.disk import driver as disk_dvr
from nova.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = conf.CONF
IMAGE_API = image.API()
class LocalStorage(disk_dvr.DiskAdapter):
def __init__(self, adapter, host_uuid):
super(LocalStorage, self).__init__(adapter, host_uuid)
self.capabilities = {
'shared_storage': False,
'has_imagecache': False,
# NOTE(efried): 'snapshot' capability set dynamically below.
}
# Query to get the Volume Group UUID
if not CONF.powervm.volume_group_name:
raise exception.OptRequiredIfOtherOptValue(
if_opt='disk_driver', if_value='localdisk',
then_opt='volume_group_name')
self.vg_name = CONF.powervm.volume_group_name
vios_w, vg_w = tsk_stg.find_vg(adapter, self.vg_name)
self._vios_uuid = vios_w.uuid
self.vg_uuid = vg_w.uuid
# Set the 'snapshot' capability dynamically. If we're hosting I/O on
# the management partition, we can snapshot. If we're hosting I/O on
# traditional VIOS, we are limited by the fact that a VSCSI device
# can't be mapped to two partitions (the VIOS and the management) at
# once.
self.capabilities['snapshot'] = self.mp_uuid == self._vios_uuid
LOG.info("Local Storage driver initialized: volume group: '%s'",
self.vg_name)
@property
def _vios_uuids(self):
"""List the UUIDs of the Virtual I/O Servers hosting the storage.
For localdisk, there's only one.
"""
return [self._vios_uuid]
@staticmethod
def _disk_match_func(disk_type, instance):
"""Return a matching function to locate the disk for an instance.
:param disk_type: One of the DiskType enum values.
:param instance: The instance whose disk is to be found.
:return: Callable suitable for the match_func parameter of the
pypowervm.tasks.scsi_mapper.find_maps method.
"""
disk_name = LocalStorage._get_disk_name(
disk_type, instance, short=True)
return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name])
@property
def capacity(self):
"""Capacity of the storage in gigabytes."""
vg_wrap = self._get_vg_wrap()
return float(vg_wrap.capacity)
@property
def capacity_used(self):
"""Capacity of the storage in gigabytes that is used."""
vg_wrap = self._get_vg_wrap()
# Subtract available from capacity
return float(vg_wrap.capacity) - float(vg_wrap.available_size)
def delete_disks(self, storage_elems):
"""Removes the specified disks.
:param storage_elems: A list of the storage elements that are to be
deleted. Derived from the return value from
detach_disk.
"""
# All of localdisk is done against the volume group. So reload
# that (to get new etag) and then update against it.
tsk_stg.rm_vg_storage(self._get_vg_wrap(), vdisks=storage_elems)
def detach_disk(self, instance):
"""Detaches the storage adapters from the image disk.
:param instance: Instance to disconnect the image for.
:return: A list of all the backing storage elements that were
disconnected from the I/O Server and VM.
"""
lpar_uuid = vm.get_pvm_uuid(instance)
# Build the match function
match_func = tsk_map.gen_match_func(pvm_stg.VDisk)
vios_w = pvm_vios.VIOS.get(
self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
# Remove the mappings.
mappings = tsk_map.remove_maps(
vios_w, lpar_uuid, match_func=match_func)
# Update the VIOS with the removed mappings.
vios_w.update()
return [x.backing_storage for x in mappings]
def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
"""Disconnect a disk from the management partition.
:param vios_uuid: The UUID of the Virtual I/O Server serving the
mapping.
:param disk_name: The name of the disk to unmap.
"""
tsk_map.remove_vdisk_mapping(self._adapter, vios_uuid, self.mp_uuid,
disk_names=[disk_name])
LOG.info("Unmapped boot disk %(disk_name)s from the management "
"partition from Virtual I/O Server %(vios_name)s.",
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_name': vios_uuid})
def create_disk_from_image(self, context, instance, image_meta):
"""Creates a disk and copies the specified image to it.
Cleans up the created disk if an error occurs.
:param context: nova context used to retrieve image from glance
:param instance: instance to create the disk for.
:param image_meta: The metadata of the image of the instance.
:return: The backing pypowervm storage object that was created.
"""
LOG.info('Create disk.', instance=instance)
return self._upload_image(context, instance, image_meta)
# TODO(esberglu): Copy vdisk when implementing image cache.
def _upload_image(self, context, instance, image_meta):
"""Upload a new image.
:param context: Nova context used to retrieve image from glance.
:param image_meta: The metadata of the image of the instance.
:return: The virtual disk containing the image.
"""
img_name = self._get_disk_name(disk_dvr.DiskType.BOOT, instance,
short=True)
# TODO(esberglu) Add check for cached image when adding imagecache.
return tsk_stg.upload_new_vdisk(
self._adapter, self._vios_uuid, self.vg_uuid,
disk_dvr.IterableToFileAdapter(
IMAGE_API.download(context, image_meta.id)), img_name,
image_meta.size, d_size=image_meta.size,
upload_type=tsk_stg.UploadType.IO_STREAM,
file_format=image_meta.disk_format)[0]
def attach_disk(self, instance, disk_info, stg_ftsk):
"""Attaches the disk image to the Virtual Machine.
:param instance: nova instance to connect the disk to.
:param disk_info: The pypowervm storage element returned from
create_disk_from_image. Ex. VOptMedia, VDisk, LU,
or PV.
:param stg_ftsk: The pypowervm transaction FeedTask for the
I/O Operations. The Virtual I/O Server mapping updates
will be added to the FeedTask. This defers the updates
to some later point in time.
"""
lpar_uuid = vm.get_pvm_uuid(instance)
def add_func(vios_w):
LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
{'vios': vios_w.name}, instance=instance)
mapping = tsk_map.build_vscsi_mapping(
self._host_uuid, vios_w, lpar_uuid, disk_info)
return tsk_map.add_map(vios_w, mapping)
stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func)
def _get_vg_wrap(self):
return pvm_stg.VG.get(self._adapter, uuid=self.vg_uuid,
parent_type=pvm_vios.VIOS,
parent_uuid=self._vios_uuid)

View File

@ -15,6 +15,7 @@
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from pypowervm import adapter as pvm_apt
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
@ -35,7 +36,6 @@ from nova.i18n import _
from nova import image
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.powervm.disk import ssp
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import image as tf_img
@ -47,6 +47,12 @@ from nova.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DISK_ADPT_NS = 'nova.virt.powervm.disk'
DISK_ADPT_MAPPINGS = {
'localdisk': 'localdisk.LocalStorage',
'ssp': 'ssp.SSPDiskAdapter'
}
class PowerVMDriver(driver.ComputeDriver):
"""PowerVM NovaLink Implementation of Compute Driver.
@ -91,9 +97,9 @@ class PowerVMDriver(driver.ComputeDriver):
pvm_stor.ComprehensiveScrub(self.adapter).execute()
# Initialize the disk adapter
# TODO(efried): Other disk adapters (localdisk), by conf selection.
self.disk_dvr = ssp.SSPDiskAdapter(self.adapter,
self.host_wrapper.uuid)
self.disk_dvr = importutils.import_object_ns(
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
self.adapter, self.host_wrapper.uuid)
self.image_api = image.API()
LOG.info("The PowerVM compute driver has been initialized.")
@ -312,8 +318,13 @@ class PowerVMDriver(driver.ComputeDriver):
nova.compute.task_states.IMAGE_SNAPSHOT. See
nova.objects.instance.Instance.save for expected_task_state usage.
"""
# TODO(esberglu) Add check for disk driver snapshot capability when
# additional disk drivers are implemented.
if not self.disk_dvr.capabilities.get('snapshot'):
raise exc.NotSupportedWithOption(
message=_("The snapshot operation is not supported in "
"conjunction with a [powervm]/disk_driver setting "
"of %s.") % CONF.powervm.disk_driver)
self._log_operation('snapshot', instance)
# Define the flow.

View File

@ -0,0 +1,15 @@
---
features:
- |
The PowerVM virt driver now supports booting from local ephemeral disk.
Two new configuration options have been introduced to the ``powervm``
configuration group, ``disk_driver`` and ``volume_group_name``. The former
allows the selection of either ssp or localdisk for the PowerVM disk
driver. The latter specifies the name of the volume group when using the
localdisk disk driver.
upgrade:
- |
The PowerVM virt driver previously used the PowerVM Shared Storage Pool
disk driver by default. The default disk driver for PowerVM is now
localdisk. See configuration option ``[powervm]/disk_driver`` for usage
details.