Add handle_update to VolumeAttachment

Updates for volume attachments were handled in UpdateReplace manner,
which fails when in need to attach new volume to the same mount
(or attach the same volume to different mount on the same server,
or attaching the same volume to different server).

This patch makes all properties of VolumeAttachment
to be update_allowed, adds necessary handle_update logic
and fixes VolumeDetachTask to reliably detach old volume
so that new one can be immediately attached.

Change-Id: Id81a53a34b5bb2dfbc9425b91be0d02539fe2456
Closes-Bug: #1269686
Closes-Bug: #1251439
This commit is contained in:
Pavlo Shchelokovskyy 2014-02-03 16:04:06 +02:00 committed by Steve Baker
parent 4521e0347d
commit db9545ea9e
2 changed files with 319 additions and 45 deletions

View File

@ -232,43 +232,50 @@ class VolumeAttachTask(object):
class VolumeDetachTask(object):
"""A task for detaching a volume from a Nova server."""
def __init__(self, stack, server_id, volume_id):
def __init__(self, stack, server_id, attachment_id):
"""
Initialise with the stack (for obtaining the clients), and the IDs of
the server and volume.
"""
self.clients = stack.clients
self.server_id = server_id
self.volume_id = volume_id
self.attachment_id = attachment_id
def __str__(self):
"""Return a human-readable string description of the task."""
return 'Detaching Volume %s from Instance %s' % (self.volume_id,
self.server_id)
return _('Removing attachment %(att)s from Instance %(srv)s') % {
'att': self.attachment_id, 'srv': self.server_id}
def __repr__(self):
"""Return a brief string description of the task."""
return '%s(%s -/> %s)' % (type(self).__name__,
self.volume_id,
self.attachment_id,
self.server_id)
def __call__(self):
"""Return a co-routine which runs the task."""
logger.debug(str(self))
server_api = self.clients.nova().volumes
# get reference to the volume while it is attached
try:
vol = self.clients.cinder().volumes.get(self.volume_id)
except clients.cinderclient.exceptions.NotFound:
nova_vol = server_api.get_server_volume(self.server_id,
self.attachment_id)
vol = self.clients.cinder().volumes.get(nova_vol.id)
except (clients.cinderclient.exceptions.NotFound,
clients.novaclient.exceptions.BadRequest,
clients.novaclient.exceptions.NotFound):
logger.warning(_('%s - volume not found') % str(self))
return
server_api = self.clients.nova().volumes
# detach the volume using volume_attachment
try:
server_api.delete_server_volume(self.server_id, self.volume_id)
server_api.delete_server_volume(self.server_id, self.attachment_id)
except (clients.novaclient.exceptions.BadRequest,
clients.novaclient.exceptions.NotFound) as e:
logger.warning('%s - %s' % (str(self), str(e)))
logger.warning('%(res)s - %(err)s' % {'res': str(self),
'err': str(e)})
yield
@ -280,7 +287,7 @@ class VolumeDetachTask(object):
try:
server_api.delete_server_volume(self.server_id,
self.volume_id)
self.attachment_id)
except (clients.novaclient.exceptions.BadRequest,
clients.novaclient.exceptions.NotFound):
pass
@ -294,6 +301,27 @@ class VolumeDetachTask(object):
except clients.cinderclient.exceptions.NotFound:
logger.warning(_('%s - volume not found') % str(self))
# The next check is needed for immediate reattachment when updating:
# as the volume info is taken from cinder, but the detach
# request is sent to nova, there might be some time
# between cinder marking volume as 'available' and
# nova removing attachment from it's own objects, so we
# check that nova already knows that the volume is detached
def server_has_attachment(server_id, attachment_id):
try:
server_api.get_server_volume(server_id, attachment_id)
except clients.novaclient.exceptions.NotFound:
return False
return True
while server_has_attachment(self.server_id, self.attachment_id):
logger.info(_("Server %(srv)s still has attachment %(att)s.") %
{'att': self.attachment_id, 'srv': self.server_id})
yield
logger.info(_("Volume %(vol)s is detached from server %(srv)s") %
{'vol': vol.id, 'srv': self.server_id})
class VolumeAttachment(resource.Resource):
PROPERTIES = (
@ -306,12 +334,14 @@ class VolumeAttachment(resource.Resource):
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the instance to which the volume attaches.'),
required=True
required=True,
update_allowed=True
),
VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to be attached.'),
required=True
required=True,
update_allowed=True
),
DEVICE: properties.Schema(
properties.Schema.STRING,
@ -319,12 +349,15 @@ class VolumeAttachment(resource.Resource):
'assignment may not be honored and it is advised that the path '
'/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedPattern('/dev/vd[b-z]'),
]
),
}
update_allowed_keys = ('Properties',)
def handle_create(self):
server_id = self.properties[self.INSTANCE_ID]
volume_id = self.properties[self.VOLUME_ID]
@ -344,10 +377,49 @@ class VolumeAttachment(resource.Resource):
def handle_delete(self):
server_id = self.properties[self.INSTANCE_ID]
volume_id = self.properties[self.VOLUME_ID]
detach_task = VolumeDetachTask(self.stack, server_id, volume_id)
detach_task = VolumeDetachTask(self.stack, server_id, self.resource_id)
scheduler.TaskRunner(detach_task)()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
checkers = []
if prop_diff:
# Even though some combinations of changed properties
# could be updated in UpdateReplace manner,
# we still first detach the old resource so that
# self.resource_id is not replaced prematurely
volume_id = self.properties.get(self.VOLUME_ID)
if self.VOLUME_ID in prop_diff:
volume_id = prop_diff.get(self.VOLUME_ID)
device = self.properties.get(self.DEVICE)
if self.DEVICE in prop_diff:
device = prop_diff.get(self.DEVICE)
server_id = self.properties.get(self.INSTANCE_ID)
detach_task = VolumeDetachTask(self.stack, server_id,
self.resource_id)
checkers.append(scheduler.TaskRunner(detach_task))
if self.INSTANCE_ID in prop_diff:
server_id = prop_diff.get(self.INSTANCE_ID)
attach_task = VolumeAttachTask(self.stack, server_id,
volume_id, device)
checkers.append(scheduler.TaskRunner(attach_task))
if checkers:
checkers[0].start()
return checkers
def check_update_complete(self, checkers):
for checker in checkers:
if not checker.started():
checker.start()
if not checker.step():
return False
self.resource_id_set(checkers[-1]._task.attachment_id)
return True
class CinderVolume(Volume):
@ -483,18 +555,21 @@ class CinderVolumeAttachment(VolumeAttachment):
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the server to which the volume attaches.'),
required=True
required=True,
update_allowed=True
),
VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to be attached.'),
required=True
required=True,
update_allowed=True
),
DEVICE: properties.Schema(
properties.Schema.STRING,
_('The location where the volume is exposed on the instance. This '
'assignment may not be honored and it is advised that the path '
'/dev/disk/by-id/virtio-<VolumeId> be used instead.')
'/dev/disk/by-id/virtio-<VolumeId> be used instead.'),
update_allowed=True
),
}

View File

@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from cinderclient.v1 import client as cinderclient
@ -59,6 +59,15 @@ volume_template = '''
"Tags" : [{ "Key" : "Usage", "Value" : "Wiki Data Volume" }]
}
},
"DataVolume2" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "2",
"AvailabilityZone" : {"Fn::GetAtt": ["WikiDatabase",
"AvailabilityZone"]},
"Tags" : [{ "Key" : "Usage", "Value" : "Wiki Data Volume2" }]
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
@ -84,6 +93,7 @@ class VolumeTest(HeatTestCase):
self.m.StubOutWithMock(self.cinder_fc.volumes, 'delete')
self.m.StubOutWithMock(self.fc.volumes, 'create_server_volume')
self.m.StubOutWithMock(self.fc.volumes, 'delete_server_volume')
self.m.StubOutWithMock(self.fc.volumes, 'get_server_volume')
self.m.StubOutWithMock(nova_utils, 'get_image_id')
utils.setup_dummy_db()
@ -124,12 +134,16 @@ class VolumeTest(HeatTestCase):
clients.cinderclient.exceptions.NotFound('Not found'))
self.m.ReplayAll()
def _mock_create_server_volume_script(self, fva):
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
def _mock_create_server_volume_script(self, fva,
server=u'WikiDatabase',
volume='vol-123',
device=u'/dev/vdc',
update=False):
if not update:
clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
self.fc.volumes.create_server_volume(
device=u'/dev/vdc', server_id=u'WikiDatabase',
volume_id=u'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
device=device, server_id=server, volume_id=volume).AndReturn(fva)
self.cinder_fc.volumes.get(volume).AndReturn(fva)
def test_volume(self):
fv = FakeVolume('creating', 'available')
@ -203,6 +217,7 @@ class VolumeTest(HeatTestCase):
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources'].pop('DataVolume2')
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['DataVolume']
@ -282,9 +297,16 @@ class VolumeTest(HeatTestCase):
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.delete_server_volume('WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
self.m.ReplayAll()
@ -296,9 +318,6 @@ class VolumeTest(HeatTestCase):
self.assertEqual('available', fv.status)
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertRaises(resource.UpdateReplace,
rsrc.handle_update, {}, {}, {})
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
@ -318,7 +337,9 @@ class VolumeTest(HeatTestCase):
fva.get().MultipleTimes()
fva.status = "in-use"
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(
@ -336,6 +357,11 @@ class VolumeTest(HeatTestCase):
'WikiDatabase', 'vol-123').AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
self.m.ReplayAll()
t = template_format.parse(volume_template)
@ -346,9 +372,6 @@ class VolumeTest(HeatTestCase):
self.assertEqual('available', fv.status)
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertRaises(resource.UpdateReplace,
rsrc.handle_update, {}, {}, {})
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
@ -363,7 +386,9 @@ class VolumeTest(HeatTestCase):
self._mock_create_server_volume_script(fva)
# delete script
self.cinder_fc.volumes.get('vol-123').AndRaise(
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndRaise(
clients.cinderclient.exceptions.NotFound('Not found'))
self.m.ReplayAll()
@ -391,9 +416,16 @@ class VolumeTest(HeatTestCase):
# delete script
volume_detach_cycle = 'in-use', 'detaching', 'available'
fva = FakeLatencyVolume(life_cycle=volume_detach_cycle)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
self.m.ReplayAll()
@ -420,10 +452,11 @@ class VolumeTest(HeatTestCase):
# delete script
fva = FakeVolume('in-use', 'error')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume('WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.m.ReplayAll()
t = template_format.parse(volume_template)
@ -461,6 +494,168 @@ class VolumeTest(HeatTestCase):
self.m.VerifyAll()
def test_volume_attachment_update_device(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
fva2 = FakeVolume('attaching', 'in-use')
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
# attach script
self._mock_create_server_volume_script(fva2, device=u'/dev/vdd',
update=True)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual('available', fv.status)
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
after = copy.deepcopy(t)['Resources']['MountPoint']
after['Properties']['VolumeId'] = 'vol-123'
after['Properties']['InstanceId'] = 'WikiDatabase'
after['Properties']['Device'] = '/dev/vdd'
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_volume_attachment_update_volume(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
fv2 = FakeVolume('creating', 'available')
fv2.id = 'vol-456'
fv2a = FakeVolume('attaching', 'in-use')
fv2a.id = 'vol-456'
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
vol2_name = utils.PhysName(stack_name, 'DataVolume2')
self.cinder_fc.volumes.create(
size=2, availability_zone='nova',
display_description=vol2_name,
display_name=vol2_name,
metadata={u'Usage': u'Wiki Data Volume2'}).AndReturn(fv2)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
# attach script
self._mock_create_server_volume_script(fv2a, volume='vol-456',
update=True)
#self.fc.volumes.create_server_volume(
#device=u'/dev/vdc', server_id=u'WikiDatabase',
#volume_id='vol-456').AndReturn(fv2a)
#self.cinder_fc.volumes.get('vol-456').AndReturn(fv2a)
self.m.ReplayAll()
t = template_format.parse(volume_template)
zone = 'nova'
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = zone
t['Resources']['DataVolume2']['Properties']['AvailabilityZone'] = zone
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual('available', fv.status)
scheduler.TaskRunner(stack['DataVolume2'].create)()
self.assertEqual('available', fv2.status)
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
after = copy.deepcopy(t)['Resources']['MountPoint']
after['Properties']['VolumeId'] = 'vol-456'
after['Properties']['InstanceId'] = 'WikiDatabase'
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(fv2a.id, rsrc.resource_id)
self.m.VerifyAll()
def test_volume_attachment_update_server(self):
fv = FakeVolume('creating', 'available')
fva = FakeVolume('attaching', 'in-use')
fva2 = FakeVolume('attaching', 'in-use')
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
# attach script
self._mock_create_server_volume_script(fva2, server=u'WikiDatabase2',
update=True)
self.m.ReplayAll()
t = template_format.parse(volume_template)
t['Resources']['DataVolume']['Properties']['AvailabilityZone'] = 'nova'
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack['DataVolume'].create)()
self.assertEqual('available', fv.status)
rsrc = self.create_attachment(t, stack, 'MountPoint')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
after = copy.deepcopy(t)['Resources']['MountPoint']
after['Properties']['VolumeId'] = 'vol-123'
after['Properties']['InstanceId'] = 'WikiDatabase2'
#after['Properties']['Device'] = '/dev/vdd'
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
@skipIf(volume_backups is None, 'unable to import volume_backups')
def test_snapshot(self):
stack_name = 'test_volume_stack'
@ -793,9 +988,16 @@ class VolumeTest(HeatTestCase):
# delete script
fva = FakeVolume('in-use', 'available')
self.fc.volumes.delete_server_volume('WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(
clients.novaclient.exceptions.NotFound('NotFound'))
self.m.ReplayAll()
@ -817,9 +1019,6 @@ class VolumeTest(HeatTestCase):
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertRaises(resource.UpdateReplace, rsrc.handle_update,
{}, {}, {})
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()