Migration for attached volumes

Enable migration for attached volumes by calling Nova to have the
hypervisor copy the data (Nova's swap_volume feature). A new API
function is added for Nova to call once it is done copying to finish
the Cinder side of the migration.

The overall 'generic' migration flow (i.e., not performed by a driver)
is as follows:
1. Creates a new volume
2a. If the source volume's state is 'available', attach both volumes
    and run 'dd' to copy, then detach both. When finished, call the
    migrate_volume_completion function.
2b. If the source volume's state is 'in-use', call Nova to perform the
    copy. Nova will attach the new volume, copy the data from the
    original volume to the new one, and detach the original volume.
    When the copy completes, Nova will call Cinder's new
    migrate_volume_completion function.
3. The migrate_volume_completion function deletes the original volume,
   and calls the database API's finish_volume_migration function. This
   copies all of the new volume's information to the original, and
   deletes the new volume's row, and thus we can keep using the
   original volume_id (the user sees no change).

We also don't change the original volume's status, and instead add a
migration_status column which only selected users can see (e.g., admin).
The migration status is None when no migration is in progress, whether
it succeeded or failed. The admin should check the volume's current
host to determine success or failure. This is meant to simplify
operations. The user will only be aware of a migration if they try to
change the volume's state during the course of a migration.

As mentioned, we change the volume while keeping the original volume
ID. Because a volume's name depends on its ID, the new volume will have
a different name than the original. This is the purpose of the name_id
field in the database - the name is now based on name_id. So although
we keep the original volume's ID, we use the new volume's ID as the
name_id. Thus we can remove the rename_volume function - it is no
longer necessary because the name_id field in the database already
allows for the volume's name on the backend to not rely on its ID.
The user than can see the migration_status can also see the
name_id, in case they need to find it on the backend.

There were a few other places throughout the code that relied on
constructing a volume's name from its ID, and those were fixed.

DocImpact

Implements: bp online-volume-migration

Change-Id: I8daaee174e426fbd450fa75f04f9c8e6fa09f01a
This commit is contained in:
Avishay Traeger 2013-08-19 13:45:38 +03:00
parent ef4d1084f4
commit 57141d6817
37 changed files with 682 additions and 137 deletions

View File

@ -155,6 +155,33 @@ class VolumeAdminController(AdminController):
self.volume_api.migrate_volume(context, volume, host, force_host_copy)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
try:
params = body['os-migrate_volume_completion']
except KeyError:
raise exc.HTTPBadRequest("Body does not contain "
"'os-migrate_volume_completion'")
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest("Must specify 'new_volume'")
try:
new_volume = self._get(context, new_volume_id)
except exception.NotFound:
raise exc.HTTPNotFound()
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""

View File

@ -0,0 +1,97 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import volume
authorize = extensions.soft_extension_authorizer('volume',
'volume_mig_status_attribute')
class VolumeMigStatusAttributeController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeMigStatusAttributeController, self).__init__(*args,
**kwargs)
self.volume_api = volume.API()
def _add_volume_mig_status_attribute(self, context, resp_volume):
try:
db_volume = self.volume_api.get(context, resp_volume['id'])
except Exception:
return
else:
key = "%s:migstat" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['migration_status']
key = "%s:name_id" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['_name_id']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if authorize(context):
resp_obj.attach(xml=VolumeMigStatusAttributeTemplate())
self._add_volume_mig_status_attribute(context,
resp_obj.obj['volume'])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if authorize(context):
resp_obj.attach(xml=VolumeListMigStatusAttributeTemplate())
for volume in list(resp_obj.obj['volumes']):
self._add_volume_mig_status_attribute(context, volume)
class Volume_mig_status_attribute(extensions.ExtensionDescriptor):
"""Expose migration_status as an attribute of a volume."""
name = "VolumeMigStatusAttribute"
alias = "os-vol-mig-status-attr"
namespace = ("http://docs.openstack.org/volume/ext/"
"volume_mig_status_attribute/api/v1")
updated = "2013-08-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeMigStatusAttributeController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
def make_volume(elem):
elem.set('{%s}migstat' % Volume_mig_status_attribute.namespace,
'%s:migstat' % Volume_mig_status_attribute.alias)
elem.set('{%s}name_id' % Volume_mig_status_attribute.namespace,
'%s:name_id' % Volume_mig_status_attribute.alias)
class VolumeMigStatusAttributeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
alias = Volume_mig_status_attribute.alias
namespace = Volume_mig_status_attribute.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class VolumeListMigStatusAttributeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
alias = Volume_mig_status_attribute.alias
namespace = Volume_mig_status_attribute.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})

View File

@ -65,7 +65,6 @@ iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
CONF = cfg.CONF
CONF.register_opts(iscsi_helper_opt)
CONF.import_opt('volume_name_template', 'cinder.db')
class TargetAdmin(executor.Executor):
@ -86,8 +85,8 @@ class TargetAdmin(executor.Executor):
"""Create a iSCSI target and logical unit."""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
"""Remove a iSCSI target and logical unit."""
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
"""Remove a iSCSI target and logical unit"""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
@ -193,9 +192,9 @@ class TgtAdm(TargetAdmin):
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for: %s') % vol_id)
vol_uuid_file = CONF.volume_name_template % vol_id
vol_uuid_file = vol_name
volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (CONF.iscsi_target_prefix,
@ -297,11 +296,11 @@ class IetAdm(TargetAdmin):
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
vol_uuid_file = CONF.volume_name_template % vol_id
vol_uuid_file = vol_name
conf_file = CONF.iet_conf
if os.path.exists(conf_file):
with self.temporary_chown(conf_file):
@ -443,9 +442,9 @@ class LioAdm(TargetAdmin):
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = 'volume-%s' % vol_id
vol_uuid_name = vol_name
iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_uuid_name)
try:

View File

@ -44,7 +44,6 @@ iser_helper_opt = [cfg.StrOpt('iser_helper',
CONF = cfg.CONF
CONF.register_opts(iser_helper_opt)
CONF.import_opt('volume_name_template', 'cinder.db')
class TargetAdmin(executor.Executor):
@ -65,7 +64,7 @@ class TargetAdmin(executor.Executor):
"""Create a iSER target and logical unit."""
raise NotImplementedError()
def remove_iser_target(self, tid, lun, vol_id, **kwargs):
def remove_iser_target(self, tid, lun, vol_id, vol_name, **kwargs):
"""Remove a iSER target and logical unit."""
raise NotImplementedError()
@ -172,9 +171,9 @@ class TgtAdm(TargetAdmin):
return tid
def remove_iser_target(self, tid, lun, vol_id, **kwargs):
def remove_iser_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iser_target for: %s') % vol_id)
vol_uuid_file = CONF.volume_name_template % vol_id
vol_uuid_file = vol_name
volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (CONF.iser_target_prefix,

View File

@ -97,3 +97,9 @@ def novaclient(context):
class API(base.Base):
"""API for interacting with novaclient."""
def update_server_volume(self, context, server_id, attachment_id,
new_volume_id):
novaclient(context).volumes.update_server_volume(server_id,
attachment_id,
new_volume_id)

View File

@ -1099,8 +1099,15 @@ def finish_volume_migration(context, src_vol_id, dest_vol_id):
with session.begin():
dest_volume_ref = _volume_get(context, dest_vol_id, session=session)
updates = {}
if dest_volume_ref['_name_id']:
updates['_name_id'] = dest_volume_ref['_name_id']
else:
updates['_name_id'] = dest_volume_ref['id']
for key, value in dest_volume_ref.iteritems():
if key in ['id', 'status']:
if key in ['id', '_name_id']:
continue
if key == 'migration_status':
updates[key] = None
continue
updates[key] = value
session.query(models.Volume).\
@ -1136,7 +1143,9 @@ def volume_detached(context, volume_id):
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref['status'] = 'available'
# Hide status update from user if we're performing a volume migration
if not volume_ref['migration_status']:
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref['instance_uuid'] = None

View File

@ -0,0 +1,36 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import String, Column, MetaData, Table
def upgrade(migrate_engine):
"""Add migration_status column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
migration_status = Column('migration_status', String(255))
volumes.create_column(migration_status)
def downgrade(migrate_engine):
"""Remove migration_status column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
migration_status = volumes.columns.migration_status
volumes.drop_column(migration_status)

View File

@ -111,6 +111,7 @@ class Volume(BASE, CinderBase):
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
migration_status = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
@ -340,7 +341,7 @@ class Snapshot(BASE, CinderBase):
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
return self.volume.name # pylint: disable=E1101
user_id = Column(String(255))
project_id = Column(String(255))

View File

@ -100,7 +100,7 @@ class SchedulerManager(manager.Manager):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error_migrating'}}
volume_state = {'volume_state': {'migration_status': None}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import ast
import shutil
import tempfile
import webob
@ -495,7 +496,7 @@ class AdminActionsTest(test.TestCase):
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
volume = self._migrate_volume_exec(ctx, volume, host, expected_status)
self.assertEquals(volume['status'], 'migrating')
self.assertEquals(volume['migration_status'], 'starting')
def test_migrate_volume_as_non_admin(self):
expected_status = 403
@ -518,12 +519,12 @@ class AdminActionsTest(test.TestCase):
volume = self._migrate_volume_prep()
self._migrate_volume_exec(ctx, volume, host, expected_status)
def test_migrate_volume_in_use(self):
def test_migrate_volume_migrating(self):
expected_status = 400
host = 'test2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_prep()
model_update = {'status': 'in-use'}
model_update = {'migration_status': 'migrating'}
volume = db.volume_update(ctx, volume['id'], model_update)
self._migrate_volume_exec(ctx, volume, host, expected_status)
@ -534,3 +535,79 @@ class AdminActionsTest(test.TestCase):
volume = self._migrate_volume_prep()
db.snapshot_create(ctx, {'volume_id': volume['id']})
self._migrate_volume_exec(ctx, volume, host, expected_status)
def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error,
expected_status, expected_id):
admin_ctx = context.get_admin_context()
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
body_dict = {'new_volume': new_volume['id'], 'error': error}
req.body = jsonutils.dumps({'os-migrate_volume_completion': body_dict})
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
resp_dict = ast.literal_eval(resp.body)
# verify status
self.assertEquals(resp.status_int, expected_status)
if expected_id:
self.assertEquals(resp_dict['save_volume_id'], expected_id)
else:
self.assertNotIn('save_volume_id', resp_dict)
def test_migrate_volume_comp_as_non_admin(self):
admin_ctx = context.get_admin_context()
volume = db.volume_create(admin_ctx, {'id': 'fake1'})
new_volume = db.volume_create(admin_ctx, {'id': 'fake2'})
expected_status = 403
expected_id = None
ctx = context.RequestContext('fake', 'fake')
volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)
def test_migrate_volume_comp_no_mig_status(self):
admin_ctx = context.get_admin_context()
volume1 = db.volume_create(admin_ctx, {'id': 'fake1',
'migration_status': 'foo'})
volume2 = db.volume_create(admin_ctx, {'id': 'fake2',
'migration_status': None})
expected_status = 400
expected_id = None
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False,
expected_status, expected_id)
volume = self._migrate_volume_comp_exec(ctx, volume2, volume1, False,
expected_status, expected_id)
def test_migrate_volume_comp_bad_mig_status(self):
admin_ctx = context.get_admin_context()
volume1 = db.volume_create(admin_ctx,
{'id': 'fake1',
'migration_status': 'migrating'})
volume2 = db.volume_create(admin_ctx,
{'id': 'fake2',
'migration_status': 'target:foo'})
expected_status = 400
expected_id = None
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_comp_exec(ctx, volume1, volume2, False,
expected_status, expected_id)
def test_migrate_volume_comp_from_nova(self):
admin_ctx = context.get_admin_context()
volume = db.volume_create(admin_ctx,
{'id': 'fake1',
'status': 'in-use',
'host': 'test',
'migration_status': None,
'attach_status': 'attached'})
new_volume = db.volume_create(admin_ctx,
{'id': 'fake2',
'status': 'available',
'host': 'test',
'migration_status': None,
'attach_status': 'detached'})
expected_status = 200
expected_id = 'fake2'
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)

View File

@ -41,6 +41,8 @@ def fake_volume_get(*args, **kwargs):
'volume_type_id': None,
'snapshot_id': None,
'project_id': 'fake',
'migration_status': None,
'_name_id': 'fake2',
}

View File

@ -0,0 +1,145 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import uuid
from lxml import etree
import webob
from cinder import context
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
def fake_volume_get(*args, **kwargs):
return {
'id': 'fake',
'host': 'host001',
'status': 'available',
'size': 5,
'availability_zone': 'somewhere',
'created_at': datetime.datetime.now(),
'attach_status': None,
'display_name': 'anothervolume',
'display_description': 'Just another volume!',
'volume_type_id': None,
'snapshot_id': None,
'project_id': 'fake',
'migration_status': 'migrating',
'_name_id': 'fake2',
}
def fake_volume_get_all(*args, **kwargs):
return [fake_volume_get()]
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeMigStatusAttributeTest(test.TestCase):
def setUp(self):
super(VolumeMigStatusAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertEqual(vol['os-vol-mig-status-attr:migstat'], 'migrating')
self.assertEqual(vol['os-vol-mig-status-attr:name_id'], 'fake2')
def test_get_volume_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volume']
self.assertFalse('os-vol-mig-status-attr:migstat' in vol)
self.assertFalse('os-vol-mig-status-attr:name_id' in vol)
def test_list_detail_volumes_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertEqual(vol[0]['os-vol-mig-status-attr:migstat'], 'migrating')
self.assertEqual(vol[0]['os-vol-mig-status-attr:name_id'], 'fake2')
def test_list_detail_volumes_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertFalse('os-vol-mig-status-attr:migstat' in vol[0])
self.assertFalse('os-vol-mig-status-attr:name_id' in vol[0])
def test_list_simple_volumes_no_migration_status(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = json.loads(res.body)['volumes']
self.assertFalse('os-vol-mig-status-attr:migstat' in vol[0])
self.assertFalse('os-vol-mig-status-attr:name_id' in vol[0])
def test_get_volume_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = etree.XML(res.body)
mig_key = ('{http://docs.openstack.org/volume/ext/'
'volume_mig_status_attribute/api/v1}migstat')
self.assertEqual(vol.get(mig_key), 'migrating')
mig_key = ('{http://docs.openstack.org/volume/ext/'
'volume_mig_status_attribute/api/v1}name_id')
self.assertEqual(vol.get(mig_key), 'fake2')
def test_list_volumes_detail_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
mig_key = ('{http://docs.openstack.org/volume/ext/'
'volume_mig_status_attribute/api/v1}migstat')
self.assertEqual(vol.get(mig_key), 'migrating')
mig_key = ('{http://docs.openstack.org/volume/ext/'
'volume_mig_status_attribute/api/v1}name_id')
self.assertEqual(vol.get(mig_key), 'fake2')

View File

@ -44,6 +44,8 @@ def fake_volume_get(*args, **kwargs):
'volume_type_id': None,
'snapshot_id': None,
'project_id': PROJECT_ID,
'migration_status': None,
'_name_id': 'fake2',
}

View File

@ -34,6 +34,7 @@ def stub_volume(id, **kwargs):
'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',

View File

@ -90,6 +90,7 @@ def return_volume(context, volume_id):
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {}}

View File

@ -35,6 +35,7 @@ def stub_volume(id, **kwargs):
'attached_host': None,
'mountpoint': '/',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',

View File

@ -90,6 +90,7 @@ def return_volume(context, volume_id):
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {}}

View File

View File

@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exception
from cinder.compute import nova
from cinder import context
from cinder import exception
@ -21,8 +19,12 @@ from cinder import test
class FakeNovaClient(object):
class Volumes(object):
def __getattr__(self, item):
return None
def __init__(self):
pass
self.volumes = self.Volumes()
class NovaApiTestCase(test.TestCase):
@ -33,3 +35,14 @@ class NovaApiTestCase(test.TestCase):
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
self.mox.StubOutWithMock(nova, 'novaclient')
def test_update_server_volume(self):
volume_id = 'volume_id1'
nova.novaclient(self.ctx).AndReturn(self.novaclient)
self.mox.StubOutWithMock(self.novaclient.volumes,
'update_server_volume')
self.novaclient.volumes.update_server_volume('server_id', 'attach_id',
'new_volume_id')
self.mox.ReplayAll()
self.api.update_server_volume(self.ctx, 'server_id', 'attach_id',
'new_volume_id')

View File

@ -36,14 +36,20 @@ class FinishVolumeMigrationTestCase(test.TestCase):
project_id='project_id',
is_admin=True)
src_volume = testutils.create_volume(ctxt, host='src',
status='migrating')
migration_status='migrating',
status='available')
dest_volume = testutils.create_volume(ctxt, host='dest',
status='migration_target')
migration_status='target:fake',
status='available')
db.finish_volume_migration(ctxt, src_volume['id'],
dest_volume['id'])
self.assertRaises(exception.VolumeNotFound, db.volume_get, ctxt,
dest_volume['id'])
src_volume = db.volume_get(ctxt, src_volume['id'])
expected_name = 'volume-%s' % dest_volume['id']
self.assertEqual(src_volume['_name_id'], dest_volume['id'])
self.assertEqual(src_volume['name'], expected_name)
self.assertEqual(src_volume['host'], 'dest')
self.assertEqual(src_volume['status'], 'migrating')
self.assertEqual(src_volume['status'], 'available')
self.assertEqual(src_volume['migration_status'], None)

View File

@ -50,3 +50,11 @@ class NameIDsTestCase(test.TestCase):
vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_snapshot_volume_name(self):
"""Make sure snapshot['volume_name'] is updated."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEquals(snap_ref['volume_name'], expected_name)

View File

@ -27,6 +27,8 @@
"volume:get_all_snapshots": [],
"volume:update_snapshot": [],
"volume:extend": [],
"volume:migrate_volume": [["rule:admin_api"]],
"volume:migrate_volume_completion": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],
@ -34,6 +36,7 @@
"volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:force_detach": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:migrate_volume": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:migrate_volume_completion": [["rule:admin_api"]],
"volume_extension:volume_actions:upload_image": [],
"volume_extension:types_manage": [],
"volume_extension:types_extra_specs": [],
@ -44,6 +47,7 @@
"volume_extension:volume_image_metadata": [],
"volume_extension:volume_host_attribute": [["rule:admin_api"]],
"volume_extension:volume_tenant_attribute": [["rule:admin_api"]],
"volume_extension:volume_mig_status_attribute": [["rule:admin_api"]],
"volume_extension:hosts": [["rule:admin_api"]],
"volume_extension:quotas:show": [],
"volume_extension:quotas:update": [],

View File

@ -105,7 +105,7 @@ class SchedulerManagerTestCase(test.TestCase):
request_spec=request_spec,
filter_properties={})
def test_migrate_volume_exception_puts_volume_in_error_state(self):
def test_migrate_volume_exception_returns_volume_state(self):
"""Test NoValidHost exception behavior for migrate_volume_to_host.
Puts the volume in 'error_migrating' state and eats the exception.
@ -122,7 +122,7 @@ class SchedulerManagerTestCase(test.TestCase):
self.context, 'host',
request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
db.volume_update(self.context, fake_volume_id,
{'status': 'error_migrating'})
{'migration_status': None})
self.mox.ReplayAll()
self.manager.migrate_volume_to_host(self.context, topic, volume_id,

View File

@ -92,7 +92,8 @@ fake_snapshot = {"id": fake_snapshot_id,
"name": fake_snapshot_name,
"volume_id": fake_volume_id,
"volume_name": fake_volume_name,
"volume_size": int(fake_volume_size) - 1}
"volume_size": int(fake_volume_size) - 1,
"volume": fake_volume}
fake_configure_data = [{"addr": "cms", "data": "FAKE"}]

View File

@ -34,6 +34,7 @@ class TargetAdminTestCase(object):
self.lun = 10
self.path = '/foo'
self.vol_id = 'blaa'
self.vol_name = 'volume-blaa'
self.script_template = None
self.stubs.Set(os.path, 'isfile', lambda _: True)
@ -84,7 +85,8 @@ class TargetAdminTestCase(object):
tgtadm.create_iscsi_target(self.target_name, self.tid,
self.lun, self.path)
tgtadm.show_target(self.tid, iqn=self.target_name)
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id,
self.vol_name)
def test_target_admin(self):
self.clear_cmds()

View File

@ -34,6 +34,7 @@ class TargetAdminTestCase(object):
self.lun = 10
self.path = '/foo'
self.vol_id = 'blaa'
self.vol_name = 'volume-blaa'
self.script_template = None
self.stubs.Set(os.path, 'isfile', lambda _: True)
@ -82,7 +83,8 @@ class TargetAdminTestCase(object):
tgtadm.create_iser_target(self.target_name, self.tid,
self.lun, self.path)
tgtadm.show_target(self.tid, iqn=self.target_name)
tgtadm.remove_iser_target(self.tid, self.lun, self.vol_id)
tgtadm.remove_iser_target(self.tid, self.lun, self.vol_id,
self.vol_name)
def test_target_admin(self):
self.clear_cmds()

View File

@ -934,3 +934,29 @@ class TestMigrations(test.TestCase):
self.assertFalse(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
def test_migration_019(self):
"""Test that adding migration_status column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertTrue(isinstance(volumes.c.migration_status.type,
sqlalchemy.types.VARCHAR))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 18)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertTrue('migration_status' not in volumes.c)

View File

@ -120,7 +120,7 @@ class BaseVolumeTestCase(test.TestCase):
@staticmethod
def _create_volume(size=0, snapshot_id=None, image_id=None,
source_volid=None, metadata=None, status="creating",
availability_zone=None):
migration_status=None, availability_zone=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
@ -1381,7 +1381,8 @@ class VolumeTestCase(BaseVolumeTestCase):
self.stubs.Set(self.volume.driver, 'migrate_volume',
lambda x, y, z: (True, {'user_id': 'foo'}))
volume = self._create_volume(status='migrating')
volume = self._create_volume(status='available',
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, False)
@ -1389,10 +1390,9 @@ class VolumeTestCase(BaseVolumeTestCase):
# check volume properties
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEquals(volume['host'], 'newhost')
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['migration_status'], None)
def test_migrate_volume_generic(self):
"""Test the generic offline volume migration."""
def fake_migr(vol, host):
raise Exception('should not be called')
@ -1401,10 +1401,7 @@ class VolumeTestCase(BaseVolumeTestCase):
def fake_create_volume(self, ctxt, volume, host, req_spec, filters):
db.volume_update(ctxt, volume['id'],
{'status': 'migration_target'})
def fake_rename_volume(self, ctxt, volume, new_name_id):
db.volume_update(ctxt, volume['id'], {'name_id': new_name_id})
{'status': 'available'})
self.stubs.Set(self.volume.driver, 'migrate_volume', fake_migr)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'create_volume',
@ -1413,24 +1410,14 @@ class VolumeTestCase(BaseVolumeTestCase):
lambda x, y, z, remote='dest': True)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
fake_delete_volume_rpc)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'rename_volume',
fake_rename_volume)
volume = self._create_volume(status='migrating')
volume = self._create_volume(status='available')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEquals(volume['host'], 'newhost')
self.assertEquals(volume['status'], 'available')
def test_rename_volume(self):
self.stubs.Set(self.volume.driver, 'rename_volume',
lambda x, y: None)
volume = self._create_volume()
self.volume.rename_volume(self.context, volume['id'], 'new_id')
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEquals(volume['name_id'], 'new_id')
self.assertEquals(volume['migration_status'], None)
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
@ -1745,7 +1732,7 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
def test_lvm_migrate_volume_no_loc_info(self):
host = {'capabilities': {}}
vol = {'name': 'test', 'id': 1, 'size': 1}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
@ -1754,7 +1741,7 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
def test_lvm_migrate_volume_bad_loc_info(self):
capabilities = {'location_info': 'foo'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
@ -1763,7 +1750,7 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
def test_lvm_migrate_volume_diff_driver(self):
capabilities = {'location_info': 'FooDriver:foo:bar'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
@ -1772,7 +1759,17 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
def test_lvm_migrate_volume_diff_host(self):
capabilities = {'location_info': 'LVMVolumeDriver:foo:bar'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertEqual(model_update, None)
def test_lvm_migrate_volume_in_use(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
@ -1783,7 +1780,7 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
self.stubs.Set(self.volume.driver, 'remove_export',
lambda x, y: None)
self.stubs.Set(self.volume.driver, '_create_volume',

View File

@ -87,6 +87,10 @@ class VolumeRpcAPITestCase(test.TestCase):
'capabilities': dest_host.capabilities}
del expected_msg['args']['dest_host']
expected_msg['args']['host'] = dest_host_dict
if 'new_volume' in expected_msg['args']:
volume = expected_msg['args']['new_volume']
del expected_msg['args']['new_volume']
expected_msg['args']['new_volume_id'] = volume['id']
expected_msg['version'] = expected_version
@ -219,9 +223,10 @@ class VolumeRpcAPITestCase(test.TestCase):
force_host_copy=True,
version='1.8')
def test_rename_volume(self):
self._test_volume_api('rename_volume',
def test_migrate_volume_completion(self):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume,
new_name_id='new_id',
version='1.8')
new_volume=self.fake_volume,
error=False,
version='1.10')

View File

@ -31,6 +31,7 @@ def create_volume(ctxt,
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1):
"""Create a volume object in the DB."""
vol = {}
@ -39,7 +40,25 @@ def create_volume(ctxt,
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
return db.volume_create(ctxt, vol)
def create_snapshot(ctxt,
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
status='creating'):
vol = db.volume_get(ctxt, volume_id)
snap = {}
snap['volume_id'] = volume_id
snap['user_id'] = ctxt.user_id
snap['project_id'] = ctxt.project_id
snap['status'] = status
snap['volume_size'] = vol['size']
snap['display_name'] = display_name
snap['display_description'] = display_description
return db.snapshot_create(ctxt, snap)

View File

@ -221,7 +221,7 @@ class API(base.Base):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['attach_status'] == "migrating":
if volume['migration_status'] != None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
@ -298,9 +298,10 @@ class API(base.Base):
return True
def _check_migration_target(volume, searchdict):
if not volume['status'].startswith('migration_target'):
return True
return False
status = volume['migration_status']
if status and status.startswith('target:'):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'metadata': _check_metadata_match,
@ -397,7 +398,11 @@ class API(base.Base):
@wrap_check_policy
def begin_detaching(self, context, volume):
self.update(context, volume, {"status": "detaching"})
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if not volume['migration_status']:
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
@ -442,6 +447,11 @@ class API(base.Base):
force=False, metadata=None):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] != None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
@ -692,15 +702,21 @@ class API(base.Base):
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] != "available":
msg = _("status must be available")
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] != None:
msg = _("Volume is already part of an active migration")
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
@ -727,13 +743,14 @@ class API(base.Base):
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'status': 'migrating'})
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
if volume['volume_type_id']:
volume_types.get_volume_type(context, volume['volume_type_id'])
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
@ -744,6 +761,31 @@ class API(base.Base):
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
class HostAPI(base.Base):
def __init__(self):

View File

@ -425,14 +425,6 @@ class VolumeDriver(object):
"""
return (False, None)
def rename_volume(self, volume, orig_name):
"""Rename the volume according to the volume object.
The original name is passed for reference, and the function can return
model_update.
"""
return None
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.

View File

@ -132,7 +132,8 @@ class BlockDeviceDriver(driver.ISCSIDriver):
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'],
volume['name'])
return
elif not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
@ -157,7 +158,8 @@ class BlockDeviceDriver(driver.ISCSIDriver):
LOG.info(_("Skipping remove_export. No iscsi_target "
"is presently exported for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'],
volume['name'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume.

View File

@ -572,7 +572,8 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"provisioned for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'],
volume['name'])
return
@ -604,7 +605,8 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"is presently exported for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['name_id'],
volume['name'])
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
@ -615,6 +617,8 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
@ -654,11 +658,6 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
return (True, model_update)
def rename_volume(self, volume, orig_name):
self._execute('lvrename', self.configuration.volume_group,
orig_name, volume['name'],
run_as_root=True)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
@ -852,7 +851,8 @@ class LVMISERDriver(LVMISCSIDriver, driver.ISERDriver):
"is presently exported for volume: %s"), volume['id'])
return
self.tgtadm.remove_iser_target(iser_target, 0, volume['id'])
self.tgtadm.remove_iser_target(iser_target, 0, volume['id'],
volume['name'])
def _update_volume_status(self):
"""Retrieve status info from volume group."""

View File

@ -42,6 +42,8 @@ import time
from oslo.config import cfg
from cinder.brick.initiator import connector as initiator
from cinder import compute
from cinder import context
from cinder import exception
from cinder.image import glance
@ -114,7 +116,7 @@ MAPPING = {
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.9'
RPC_API_VERSION = '1.10'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
@ -242,7 +244,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# If deleting the source volume in a migration, we want to skip quotas
# and other database updates.
if volume_ref['status'] == 'migrating':
if volume_ref['migration_status']:
return True
# Get reservations
@ -399,10 +401,11 @@ class VolumeManager(manager.SchedulerDependentManager):
# we should update this to a date-time object
# also consider adding detach_time?
now = timeutils.strtime()
new_status = 'attaching'
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
"attached_host": host_name,
"status": "attaching",
"status": new_status,
"attach_time": now})
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
@ -563,8 +566,14 @@ class VolumeManager(manager.SchedulerDependentManager):
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'migration_target_creating'
new_vol_values['status'] = 'creating'
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None)
@ -574,7 +583,7 @@ class VolumeManager(manager.SchedulerDependentManager):
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'migration_target':
while new_volume['status'] != 'available':
tries = tries + 1
now = time.time()
if new_volume['status'] == 'error':
@ -589,42 +598,64 @@ class VolumeManager(manager.SchedulerDependentManager):
# Copy the source volume to the destination volume
try:
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
if volume['status'] == 'available':
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'], error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
nova_api.update_server_volume(ctxt, volume['instance_uuid'],
volume['id'], new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, volume)
volume = self.db.volume_get(ctxt, volume['id'])
# If we're in the completing phase don't delete the target
# because we may have already deleted the source!
if volume['migration_status'] == 'migrating':
rpcapi.delete_volume(ctxt, new_volume)
new_volume['migration_status'] = None
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
if error:
new_volume['migration_status'] = None
rpcapi.delete_volume(ctxt, new_volume)
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
self.delete_volume(ctxt, volume['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume['id'], 'err': ex})
LOG.error(msg % {'vol': volume_id, 'err': ex})
# Rename the destination volume to the name of the source volume.
# We rename rather than create the destination with the same as the
# source because: (a) some backends require unique names between pools
# in addition to within pools, and (b) we want to enable migration
# within one pool (for example, changing a volume's type by creating a
# new volume and copying the data over)
try:
rpcapi.rename_volume(ctxt, new_volume, volume['id'])
except Exception:
msg = _("Failed to rename migration destination volume "
"%(vol)s to %(name)s")
LOG.error(msg % {'vol': new_volume['id'], 'name': volume['name']})
self.db.finish_volume_migration(ctxt, volume['id'], new_volume['id'])
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
"""Migrate the volume to the specified host (called on source host)."""
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy:
try:
LOG.debug(_("volume %s: calling driver migrate_volume"),
@ -633,7 +664,8 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref,
host)
if moved:
updates = {'host': host['host']}
updates = {'host': host['host'],
'migration_status': None}
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
@ -641,7 +673,7 @@ class VolumeManager(manager.SchedulerDependentManager):
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'status': 'error_migrating'}
updates = {'migration_status': None}
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
@ -651,26 +683,11 @@ class VolumeManager(manager.SchedulerDependentManager):
self._migrate_volume_generic(ctxt, volume_ref, host)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'status': 'error_migrating'}
updates = {'migration_status': None}
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
updates.update(model_update)
self.db.volume_update(ctxt, volume_ref['id'], updates)
self.db.volume_update(ctxt, volume_ref['id'],
{'status': 'available'})
def rename_volume(self, ctxt, volume_id, new_name_id):
volume_ref = self.db.volume_get(ctxt, volume_id)
orig_name = volume_ref['name']
self.driver.remove_export(ctxt, volume_ref)
self.db.volume_update(ctxt, volume_id, {'name_id': new_name_id})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = self.driver.rename_volume(volume_ref, orig_name)
if model_update:
self.db.volume_update(ctxt, volume_ref['id'], model_update)
model_update = self.driver.create_export(ctxt, volume_ref)
if model_update:
self.db.volume_update(ctxt, volume_ref['id'], model_update)
@periodic_task.periodic_task
def _report_driver_status(self, context):

View File

@ -44,6 +44,7 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
'''
BASE_RPC_API_VERSION = '1.0'
@ -166,10 +167,12 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.8')
def rename_volume(self, ctxt, volume, new_name_id):
self.call(ctxt,
self.make_msg('rename_volume',
volume_id=volume['id'],
new_name_id=new_name_id),
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
version='1.8')
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
return self.call(ctxt,
self.make_msg('migrate_volume_completion',
volume_id=volume['id'],
new_volume_id=new_volume['id'],
error=error),
topic=rpc.queue_get_for(ctxt, self.topic,
volume['host']),
version='1.10')

View File

@ -28,9 +28,11 @@
"volume_extension:volume_admin_actions:force_delete": [["rule:admin_api"]],
"volume_extension:snapshot_admin_actions:force_delete": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:migrate_volume": [["rule:admin_api"]],
"volume_extension:volume_admin_actions:migrate_volume_completion": [["rule:admin_api"]],
"volume_extension:volume_host_attribute": [["rule:admin_api"]],
"volume_extension:volume_tenant_attribute": [["rule:admin_api"]],
"volume_extension:volume_mig_status_attribute": [["rule:admin_api"]],
"volume_extension:hosts": [["rule:admin_api"]],
"volume_extension:services": [["rule:admin_api"]],
"volume:services": [["rule:admin_api"]],