Add VolumeAttachStatus Enum

This change adds a new enum and field, VolumeAttachStatus
and VolumeAttachStatusField, that will hold the constants for the
'attach_status' field of the VolumeAttachStatus object. This enum
and field are based on the base oslo.versionedobjects enum and field.
This also changes over the volume object to use the new field. Finally,
all uses of strings for comparison and assignment to this field are
changed over to use the constants defined within the enum.

Partial-Implements: bp cinder-object-fields
Change-Id: Ie727348daf425bd988425767f9dfb82da4c3baa8
This commit is contained in:
Kendall Nelson 2016-03-04 17:07:09 -06:00 committed by Kendall Nelson
parent 41b516234f
commit 0a32edb94c
46 changed files with 283 additions and 144 deletions

View File

@ -148,8 +148,8 @@ class VolumeAdminController(AdminController):
# parent class?
valid_status = AdminController.valid_status.union(
('attaching', 'in-use', 'detaching', 'maintenance'))
valid_attach_status = ('detached', 'attached',)
valid_attach_status = (fields.VolumeAttachStatus.ATTACHED,
fields.VolumeAttachStatus.DETACHED,)
valid_migration_status = ('migrating', 'error',
'success', 'completing',
'none', 'starting',)

View File

@ -25,6 +25,7 @@ from webob import exc
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder.objects import fields
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import utils as volume_utils
@ -49,7 +50,8 @@ def _translate_attachment_summary_view(_context, vol):
d = []
attachments = vol.volume_attachment
for attachment in attachments:
if attachment.get('attach_status') == 'attached':
if (attachment.get('attach_status') ==
fields.VolumeAttachStatus.ATTACHED):
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),
@ -95,7 +97,7 @@ def _translate_volume_summary_view(context, vol, image_id=None):
d['multiattach'] = 'false'
d['attachments'] = []
if vol['attach_status'] == 'attached':
if vol['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
d['attachments'] = _translate_attachment_detail_view(context, vol)
d['display_name'] = vol['display_name']

View File

@ -16,6 +16,7 @@
import six
from cinder.api import common
from cinder.objects import fields
class ViewBuilder(common.ViewBuilder):
@ -101,10 +102,11 @@ class ViewBuilder(common.ViewBuilder):
"""Retrieve the attachments of the volume object."""
attachments = []
if volume['attach_status'] == 'attached':
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
attaches = volume.volume_attachment
for attachment in attaches:
if attachment.get('attach_status') == 'attached':
if (attachment.get('attach_status') ==
fields.VolumeAttachStatus.ATTACHED):
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),

View File

@ -370,7 +370,7 @@ class API(base.Base):
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
'encryption_key_id': request_spec.get('encryption_key_id',
None),
'display_description': request_spec.get('description',

View File

@ -1402,7 +1402,8 @@ def volume_attached(context, attachment_id, instance_uuid, host_name,
session=session)
volume_attachment_ref['mountpoint'] = mountpoint
volume_attachment_ref['attach_status'] = 'attached'
volume_attachment_ref['attach_status'] = (fields.VolumeAttachStatus.
ATTACHED)
volume_attachment_ref['instance_uuid'] = instance_uuid
volume_attachment_ref['attached_host'] = host_name
volume_attachment_ref['attach_time'] = timeutils.utcnow()
@ -1413,7 +1414,7 @@ def volume_attached(context, attachment_id, instance_uuid, host_name,
volume_attachment_ref.save(session=session)
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED
volume_ref.save(session=session)
return volume_ref
@ -1616,7 +1617,7 @@ def volume_detached(context, volume_id, attachment_id):
# If this is already detached, attachment will be None
if attachment:
now = timeutils.utcnow()
attachment['attach_status'] = 'detached'
attachment['attach_status'] = fields.VolumeAttachStatus.DETACHED
attachment['detach_time'] = now
attachment['deleted'] = True
attachment['deleted_at'] = now
@ -1637,12 +1638,12 @@ def volume_detached(context, volume_id, attachment_id):
volume_ref['migration_status'] in ('success', 'error')):
volume_ref['status'] = 'available'
volume_ref['attach_status'] = 'detached'
volume_ref['attach_status'] = fields.VolumeAttachStatus.DETACHED
volume_ref.save(session=session)
else:
# Volume is still attached
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref['attach_status'] = fields.VolumeAttachStatus.ATTACHED
volume_ref.save(session=session)
@ -1715,7 +1716,8 @@ def volume_attachment_get_all_by_volume_id(context, volume_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter(models.VolumeAttachment.attach_status != 'detached').\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED).\
all()
return result
@ -1727,7 +1729,8 @@ def volume_attachment_get_all_by_host(context, host):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(attached_host=host).\
filter(models.VolumeAttachment.attach_status != 'detached').\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED).\
all()
return result
@ -1740,7 +1743,8 @@ def volume_attachment_get_all_by_instance_uuid(context,
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(instance_uuid=instance_uuid).\
filter(models.VolumeAttachment.attach_status != 'detached').\
filter(models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED).\
all()
return result
@ -2267,7 +2271,8 @@ def volume_has_undeletable_snapshots_filter():
def volume_has_attachments_filter():
return sql.exists().where(
and_(models.Volume.id == models.VolumeAttachment.volume_id,
models.VolumeAttachment.attach_status != 'detached',
models.VolumeAttachment.attach_status !=
fields.VolumeAttachStatus.DETACHED,
~models.VolumeAttachment.deleted))

View File

@ -124,3 +124,18 @@ class QoSConsumerValues(BaseCinderEnum):
class QoSConsumerField(BaseEnumField):
AUTO_TYPE = QoSConsumerValues()
class VolumeAttachStatus(BaseCinderEnum):
ATTACHED = 'attached'
ATTACHING = 'attaching'
DETACHED = 'detached'
ERROR_ATTACHING = 'error_attaching'
ERROR_DETACHING = 'error_detaching'
ALL = (ATTACHED, ATTACHING, DETACHED, ERROR_ATTACHING,
ERROR_DETACHING)
class VolumeAttachStatusField(BaseEnumField):
AUTO_TYPE = VolumeAttachStatus()

View File

@ -22,7 +22,7 @@ from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import cleanable
from cinder.objects import fields as c_fields
CONF = cfg.CONF
@ -82,7 +82,7 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
'size': fields.IntegerField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'attach_status': fields.StringField(nullable=True),
'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
'migration_status': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),

View File

@ -17,6 +17,7 @@ from oslo_versionedobjects import fields
from cinder import db
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
@base.CinderObjectRegistry.register
@ -36,7 +37,7 @@ class VolumeAttachment(base.CinderPersistentObject, base.CinderObject,
'attach_time': fields.DateTimeField(nullable=True),
'detach_time': fields.DateTimeField(nullable=True),
'attach_status': fields.StringField(nullable=True),
'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
'attach_mode': fields.StringField(nullable=True),
}

View File

@ -61,7 +61,7 @@ class BaseAdminTest(test.TestCase):
db_volume = {'status': 'available',
'host': 'test',
'availability_zone': 'fake_zone',
'attach_status': 'detached'}
'attach_status': fields.VolumeAttachStatus.DETACHED}
if updates:
db_volume.update(updates)
@ -146,8 +146,10 @@ class AdminActionsTest(BaseAdminTest):
vac.validate_update({'status': 'error'})
vac.validate_update({'status': 'error_deleting'})
vac.validate_update({'attach_status': 'detached'})
vac.validate_update({'attach_status': 'attached'})
vac.validate_update({'attach_status':
fields.VolumeAttachStatus.DETACHED})
vac.validate_update({'attach_status':
fields.VolumeAttachStatus.ATTACHED})
vac.validate_update({'migration_status': 'migrating'})
vac.validate_update({'migration_status': 'error'})
@ -156,18 +158,24 @@ class AdminActionsTest(BaseAdminTest):
vac.validate_update({'migration_status': 'starting'})
def test_reset_attach_status(self):
volume = db.volume_create(self.ctx, {'attach_status': 'detached'})
volume = db.volume_create(self.ctx,
{'attach_status':
fields.VolumeAttachStatus.DETACHED})
resp = self._issue_volume_reset(self.ctx,
volume,
{'attach_status': 'attached'})
{'attach_status':
fields.VolumeAttachStatus.ATTACHED})
self.assertEqual(202, resp.status_int)
volume = db.volume_get(self.ctx, volume['id'])
self.assertEqual('attached', volume['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
volume['attach_status'])
def test_reset_attach_invalid_status(self):
volume = db.volume_create(self.ctx, {'attach_status': 'detached'})
volume = db.volume_create(self.ctx,
{'attach_status':
fields.VolumeAttachStatus.DETACHED})
resp = self._issue_volume_reset(self.ctx,
volume,
@ -175,7 +183,8 @@ class AdminActionsTest(BaseAdminTest):
self.assertEqual(400, resp.status_int)
volume = db.volume_get(self.ctx, volume['id'])
self.assertEqual('detached', volume['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
volume['attach_status'])
def test_reset_migration_invalid_status(self):
volume = db.volume_create(self.ctx, {'migration_status': None})
@ -348,7 +357,8 @@ class AdminActionsTest(BaseAdminTest):
attachment = db.volume_attachment_get(self.ctx, attachment['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('attached', volume['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
volume['attach_status'])
self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
@ -360,10 +370,11 @@ class AdminActionsTest(BaseAdminTest):
self.assertEqual('rw', admin_metadata[1]['value'])
# Reset attach_status
resp = self._issue_volume_reset(self.ctx,
volume,
{'status': 'available',
'attach_status': 'detached'})
resp = self._issue_volume_reset(
self.ctx,
volume,
{'status': 'available',
'attach_status': fields.VolumeAttachStatus.DETACHED})
# request is accepted
self.assertEqual(202, resp.status_int)
@ -383,22 +394,26 @@ class AdminActionsTest(BaseAdminTest):
volume = db.volume_create(self.ctx,
{'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1,
'attach_status': 'detached'})
resp = self._issue_volume_reset(self.ctx,
volume,
{'status': 'available',
'attach_status': 'invalid'})
'attach_status':
fields.VolumeAttachStatus.DETACHED})
resp = self._issue_volume_reset(
self.ctx,
volume,
{'status': 'available',
'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING})
self.assertEqual(400, resp.status_int)
volume = db.volume_get(self.ctx, volume['id'])
self.assertEqual('available', volume['status'])
self.assertEqual('detached', volume['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
volume['attach_status'])
def test_snapshot_reset_status(self):
volume = db.volume_create(self.ctx,
{'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1,
'availability_zone': 'test',
'attach_status': 'detached'})
'attach_status':
fields.VolumeAttachStatus.DETACHED})
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': None,
@ -524,7 +539,7 @@ class AdminActionsTest(BaseAdminTest):
# current status is available
volume = self._create_volume(self.ctx,
{'provider_location': '',
'attach_status': '',
'attach_status': None,
'replication_status': 'active'})
volume = self._migrate_volume_exec(self.ctx, volume, host,
expected_status)
@ -653,10 +668,14 @@ class AdminActionsTest(BaseAdminTest):
def test_migrate_volume_comp_from_nova(self):
volume = self._create_volume(self.ctx, {'status': 'in-use',
'migration_status': None,
'attach_status': 'attached'})
'attach_status':
fields.VolumeAttachStatus.
ATTACHED})
new_volume = self._create_volume(self.ctx,
{'migration_status': None,
'attach_status': 'detached'})
'attach_status':
fields.VolumeAttachStatus.
DETACHED})
expected_status = 200
expected_id = new_volume.id
self._migrate_volume_comp_exec(self.ctx, volume, new_volume, False,
@ -758,7 +777,8 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.assertEqual('in-use', volume.status)
self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
admin_metadata = volume.admin_metadata
self.assertEqual(2, len(admin_metadata))
self.assertEqual('False', admin_metadata['readonly'])
@ -810,7 +830,8 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual(host_name, attachment['attached_host'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
admin_metadata = volume.admin_metadata
self.assertEqual(2, len(admin_metadata))
self.assertEqual('False', admin_metadata['readonly'])
@ -858,7 +879,8 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.assertEqual('in-use', volume.status)
self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
admin_metadata = volume.admin_metadata
self.assertEqual(2, len(admin_metadata))
self.assertEqual('False', admin_metadata['readonly'])
@ -938,8 +960,10 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.assertEqual('in-use', volume.status)
self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
admin_metadata = volume.admin_metadata
self.assertEqual(2, len(admin_metadata))
self.assertEqual('False', admin_metadata['readonly'])
self.assertEqual('rw', admin_metadata['attached_mode'])
@ -1028,7 +1052,7 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.volume_api.reserve_volume(self.ctx, volume)
values = {'volume_id': volume['id'],
'attach_status': 'attaching',
'attach_status': fields.VolumeAttachStatus.ATTACHING,
'attach_time': timeutils.utcnow(),
'instance_uuid': 'abc123',
}
@ -1042,7 +1066,8 @@ class AdminActionsAttachDetachTest(BaseAdminTest):
self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid'])
self.assertEqual(volume['id'], attachment['volume_id'], volume['id'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
def test_attach_attaching_volume_with_different_mode(self):
"""Test that attaching volume reserved for another mode fails."""

View File

@ -19,6 +19,7 @@ import webob
from cinder.api.contrib import volume_encryption_metadata
from cinder import context
from cinder import db
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -57,7 +58,7 @@ class VolumeEncryptionMetadataTest(test.TestCase):
'status': status,
'display_name': display_name,
'display_description': display_description,
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'availability_zone': availability_zone,
'host': host,
'encryption_key_id': encryption_key_id,

View File

@ -21,6 +21,7 @@ import webob
from cinder import context
from cinder import db
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -43,7 +44,7 @@ def fake_db_volume_get(*args, **kwargs):
'project_id': fake.PROJECT_ID,
'migration_status': None,
'_name_id': fake.VOLUME2_ID,
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
}

View File

@ -23,6 +23,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -45,7 +46,7 @@ def fake_db_volume_get(*args, **kwargs):
'project_id': fake.PROJECT_ID,
'migration_status': None,
'_name_id': fake.VOLUME2_ID,
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
}

View File

@ -26,6 +26,7 @@ import webob
from cinder.api.openstack import api_version_request as api_version
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -115,7 +116,7 @@ def api_manage(*args, **kwargs):
'snapshot_id': None,
'user_id': fake.USER_ID,
'size': 0,
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'volume_type_id': None}
return fake_volume.fake_volume_obj(ctx, **vol)

View File

@ -20,6 +20,7 @@ import webob
from cinder import context
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -35,7 +36,7 @@ def fake_db_volume_get(*args, **kwargs):
'size': 5,
'availability_zone': 'somewhere',
'created_at': timeutils.utcnow(),
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'display_name': 'anothervolume',
'display_description': 'Just another volume!',
'volume_type_id': None,

View File

@ -26,6 +26,7 @@ from cinder.api.contrib import volume_transfer
from cinder import context
from cinder import db
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -54,7 +55,8 @@ class VolumeTransferAPITestCase(test.TestCase):
display_description='this is a test volume',
status='available',
size=1,
project_id=fake.PROJECT_ID):
project_id=fake.PROJECT_ID,
attach_status=fields.VolumeAttachStatus.DETACHED):
"""Create a volume object."""
vol = {}
vol['host'] = 'fake_host'
@ -64,7 +66,7 @@ class VolumeTransferAPITestCase(test.TestCase):
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = status
vol['attach_status'] = attach_status
vol['availability_zone'] = 'fake_zone'
return db.volume_create(context.get_admin_context(), vol)['id']

View File

@ -19,6 +19,7 @@ import webob
from cinder import context
from cinder import db
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -78,8 +79,9 @@ class VolumeUnmanageTest(test.TestCase):
def test_unmanage_volume_attached(self):
"""Return 400 if the volume exists but is attached."""
vol = utils.create_volume(self.ctxt, status='in-use',
attach_status='attached')
vol = utils.create_volume(
self.ctxt, status='in-use',
attach_status=fields.VolumeAttachStatus.ATTACHED)
res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res)
db.volume_destroy(self.ctxt, vol.id)

View File

@ -37,7 +37,7 @@ def stub_volume(id, **kwargs):
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'bootable': False,
'name': 'vol name',
'display_name': 'displayname',

View File

@ -22,6 +22,7 @@ from cinder.api.v1 import snapshots
from cinder import context
import cinder.db
from cinder import exception as exc
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -104,7 +105,7 @@ def stub_get(self, context, volume_id, *args, **kwargs):
'volume_type_id': None,
'migration_status': None,
'availability_zone': 'zone1:host1',
'attach_status': 'detached'}
'attach_status': fields.VolumeAttachStatus.DETACHED}
return fake_volume.fake_volume_obj(context, **vol)

View File

@ -24,6 +24,7 @@ from cinder.api.v1 import volumes
import cinder.db
from cinder import exception as exc
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v1 import stubs
@ -133,7 +134,7 @@ def get_volume(self, context, volume_id, *args, **kwargs):
'volume_type_id': None,
'migration_status': None,
'availability_zone': 'zone1:host1',
'attach_status': 'detached'}
'attach_status': fields.VolumeAttachStatus.DETACHED}
return fake_volume.fake_volume_obj(context, **vol)

View File

@ -26,6 +26,7 @@ from cinder.api.v1 import volumes
from cinder import context
from cinder import db
from cinder import exception as exc
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v1 import stubs
@ -588,7 +589,9 @@ class VolumeApiTest(test.TestCase):
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id, attach_status='detached')
vol = stubs.stub_volume(
volume_id,
attach_status = fields.VolumeAttachStatus.DETACHED)
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)

View File

@ -47,7 +47,7 @@ def create_fake_volume(id, **kwargs):
'availability_zone': DEFAULT_AZ,
'status': DEFAULT_VOL_STATUS,
'migration_status': None,
'attach_status': 'attached',
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'name': 'vol name',
'display_name': DEFAULT_VOL_NAME,
'display_description': DEFAULT_VOL_DESCRIPTION,
@ -75,7 +75,7 @@ def create_fake_volume(id, **kwargs):
volume.update(kwargs)
if kwargs.get('volume_glance_metadata', None):
volume['bootable'] = True
if kwargs.get('attach_status') == 'detached':
if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED:
del volume['volume_admin_metadata'][0]
return volume
@ -257,7 +257,7 @@ def fake_volume_type_get(context, id, *args, **kwargs):
def fake_volume_admin_metadata_get(context, volume_id, **kwargs):
admin_meta = {'attached_mode': 'rw', 'readonly': 'False'}
if kwargs.get('attach_status') == 'detached':
if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED:
del admin_meta['attached_mode']
return admin_meta

View File

@ -25,6 +25,7 @@ from cinder.api.v2 import snapshots
from cinder import context
import cinder.db
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -91,7 +92,7 @@ def fake_get(context, *args, **kwargs):
'volume_type_id': None,
'migration_status': None,
'availability_zone': 'fake-zone',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'metadata': {}}
return fake_volume.fake_volume_obj(context, **vol)

View File

@ -33,6 +33,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import fakes as v2_fakes
@ -1310,13 +1311,15 @@ class VolumeApiTest(test.TestCase):
def test_volume_show_no_attachments(self):
def fake_volume_get(self, context, volume_id, **kwargs):
vol = v2_fakes.create_fake_volume(volume_id,
attach_status='detached')
vol = v2_fakes.create_fake_volume(
volume_id, attach_status=
fields.VolumeAttachStatus.DETACHED)
return fake_volume.fake_volume_obj(context, **vol)
def fake_volume_admin_metadata_get(context, volume_id, **kwargs):
return v2_fakes.fake_volume_admin_metadata_get(
context, volume_id, attach_status='detached')
context, volume_id, attach_status=
fields.VolumeAttachStatus.DETACHED)
self.mock_object(volume_api.API, 'get', fake_volume_get)
self.mock_object(db, 'volume_admin_metadata_get',

View File

@ -127,7 +127,7 @@ class BaseBackupTest(test.TestCase):
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
volume = objects.Volume(context=self.ctxt, **vol)
@ -162,7 +162,7 @@ class BaseBackupTest(test.TestCase):
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': 'attached', }
'attach_status': fields.VolumeAttachStatus.ATTACHED, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')

View File

@ -15,6 +15,7 @@
from oslo_versionedobjects import fields
from cinder import objects
from cinder.objects import fields as c_fields
from cinder.tests.unit import fake_constants as fake
@ -25,7 +26,7 @@ def fake_db_volume(**updates):
'name': 'volume-%s' % fake.VOLUME_ID,
'availability_zone': 'fake_availability_zone',
'status': 'available',
'attach_status': 'detached',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
'previous_status': None,
'volume_attachment': [],
'volume_metadata': [],

View File

@ -25,6 +25,7 @@ from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder.objects import fields as c_fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_objects
@ -193,7 +194,7 @@ class TestCinderObjectConditionalUpdate(test.TestCase):
'status': 'available',
'availability_zone': 'az',
'host': 'dummy',
'attach_status': 'no',
'attach_status': c_fields.VolumeAttachStatus.DETACHED,
}
volume = objects.Volume(context=self.context, **vol)
volume.create()

View File

@ -131,3 +131,28 @@ class TestSnapshotStatus(TestField):
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'not_a_status')
class TestVolumeAttachStatus(TestField):
def setUp(self):
super(TestVolumeAttachStatus, self).setUp()
self.field = fields.VolumeAttachStatusField()
self.coerce_good_values = [('attaching',
fields.VolumeAttachStatus.ATTACHING),
('attached',
fields.VolumeAttachStatus.ATTACHED),
('detached',
fields.VolumeAttachStatus.DETACHED),
('error_attaching',
fields.VolumeAttachStatus.ERROR_ATTACHING),
('error_detaching',
fields.VolumeAttachStatus.ERROR_DETACHING)]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'attaching'", self.field.stringify('attaching'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'not_a_status')

View File

@ -40,9 +40,9 @@ object_data = {
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.2-d6a9d58f627bb2a5cf804b0dd7a12bc7',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.6-19919d8086d6a38ab9d3ab88139e70e0',
'Volume': '1.6-8a56256db74c0642dca1a30739d88074',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeAttachment': '1.0-b30dacf62b2030dd83d8a1603f1064ff',
'VolumeAttachment': '1.0-6a2216211f579ffd7fd22708703c13a3',
'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeProperties': '1.1-cadac86b2bdc11eb79d1dcea988ff9e8',
'VolumeType': '1.3-a5d8c3473db9bc3bbcdbab9313acf4d1',

View File

@ -16,6 +16,7 @@ import mock
import six
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
@ -34,10 +35,11 @@ class TestVolumeAttachment(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_attachment_update')
def test_save(self, volume_attachment_update):
attachment = fake_volume.fake_volume_attachment_obj(self.context)
attachment.attach_status = 'attaching'
attachment.attach_status = fields.VolumeAttachStatus.ATTACHING
attachment.save()
volume_attachment_update.assert_called_once_with(
self.context, attachment.id, {'attach_status': 'attaching'})
self.context, attachment.id,
{'attach_status': fields.VolumeAttachStatus.ATTACHING})
@mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get')
def test_refresh(self, attachment_get):

View File

@ -288,7 +288,7 @@ class DBAPIVolumeTestCase(BaseTest):
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
instance_uuid, None, '/tmp')
@ -296,7 +296,8 @@ class DBAPIVolumeTestCase(BaseTest):
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('/tmp', attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -305,7 +306,7 @@ class DBAPIVolumeTestCase(BaseTest):
host_name = 'fake_host'
values = {'volume_id': volume['id'],
'attached_host': host_name,
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
None, host_name, '/tmp')
@ -313,7 +314,8 @@ class DBAPIVolumeTestCase(BaseTest):
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
self.assertEqual('in-use', volume['status'])
self.assertEqual('/tmp', attachment['mountpoint'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual(attachment['attached_host'], host_name)
@ -355,7 +357,7 @@ class DBAPIVolumeTestCase(BaseTest):
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
instance_uuid,
@ -373,7 +375,7 @@ class DBAPIVolumeTestCase(BaseTest):
host_name = 'fake_host'
values = {'volume_id': volume['id'],
'attach_host': host_name,
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'],
None, host_name, '/tmp')

View File

@ -82,7 +82,7 @@ class QuotaIntegrationTestCase(test.TestCase):
vol['volume_type_id'] = self.volume_type['id']
vol['host'] = 'fake_host'
vol['availability_zone'] = 'fake_zone'
vol['attach_status'] = 'detached'
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
volume = objects.Volume(context=self.context, **vol)
volume.create()
return volume

View File

@ -2087,7 +2087,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual("in-use", vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2260,7 +2261,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2328,7 +2330,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2352,7 +2355,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
@ -2398,7 +2402,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2448,7 +2453,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2501,7 +2507,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2548,7 +2555,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2571,7 +2579,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2610,7 +2619,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2657,7 +2667,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2685,7 +2696,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
@ -2718,7 +2730,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
@ -2739,7 +2752,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
@ -2751,7 +2765,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual('fake-host', attachment['attached_host'])
@ -2772,7 +2787,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
@ -2811,7 +2827,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
@ -2831,7 +2848,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('error_attaching', vol['status'])
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
@ -2859,7 +2877,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
@ -2875,7 +2894,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('detached', vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
@ -2904,7 +2924,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
vol = db.volume_get(self.context, volume_id)
# Check that volume status is 'uploading'
self.assertEqual("uploading", vol['status'])
self.assertEqual("detached", vol['attach_status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
def test_reserve_volume_success(self):
volume = tests_utils.create_volume(self.context, status='available')
@ -3147,7 +3168,8 @@ class VolumeTestCase(base.BaseVolumeTestCase):
"""Test volume can't be force delete in attached state."""
volume = tests_utils.create_volume(self.context, CONF.host,
status='in-use',
attach_status = 'attached')
attach_status=
fields.VolumeAttachStatus.ATTACHED)
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
@ -3218,7 +3240,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
self.volume.create_volume(self.context, volume)
values = {'volume_id': volume['id'],
'instance_uuid': instance_uuid,
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], instance_uuid,
None, '/dev/sda1')
@ -3241,7 +3263,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
self.volume.create_volume(self.context, volume)
values = {'volume_id': volume['id'],
'attached_host': 'fake_host',
'attach_status': 'attaching', }
'attach_status': fields.VolumeAttachStatus.ATTACHING, }
attachment = db.volume_attach(self.context, values)
db.volume_attached(self.context, attachment['id'], None,
'fake_host', '/dev/sda1')
@ -3509,7 +3531,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
@ -3727,13 +3749,16 @@ class VolumeTestCase(base.BaseVolumeTestCase):
self.context, volume)
db.volume_update(self.context, volume.id,
{'status': 'in-use', 'attach_status': 'detached'})
{'status': 'in-use',
'attach_status':
fields.VolumeAttachStatus.DETACHED})
# Should raise an error since not attached
self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching,
self.context, volume)
db.volume_update(self.context, volume.id,
{'attach_status': 'attached'})
{'attach_status':
fields.VolumeAttachStatus.ATTACHED})
# Ensure when attached no exception raised
volume_api.begin_detaching(self.context, volume)
@ -6269,7 +6294,8 @@ class GenericVolumeDriverTestCase(DriverTestCase):
temp_vol = self.volume.driver._create_temp_volume_from_snapshot(
self.context,
vol, snapshot)
self.assertEqual('detached', temp_vol.attach_status)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
temp_vol.attach_status)
self.assertEqual('fakezone', temp_vol.availability_zone)
@mock.patch.object(utils, 'brick_get_connector_properties')

View File

@ -47,7 +47,7 @@ class VolumeRpcAPITestCase(test.TestCase):
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['metadata'] = {"test_key": "test_val"}
vol['size'] = 1
volume = db.volume_create(self.context, vol)

View File

@ -62,7 +62,7 @@ def create_volume(ctxt,
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['attach_status'] = fields.VolumeAttachStatus.DETACHED
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id

View File

@ -21,6 +21,7 @@ import json
import mock
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.eseries import utils
@ -41,7 +42,7 @@ FAKE_CINDER_VOLUME = {
'display_description': 'lun1',
'volume_type_id': None,
'migration_status': None,
'attach_status': "detached"
'attach_status': fields.VolumeAttachStatus.DETACHED
}
FAKE_CINDER_SNAPSHOT = {

View File

@ -20,6 +20,7 @@ import mock
import six
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.eseries \
import fakes as eseries_fakes
@ -36,7 +37,7 @@ def get_fake_volume():
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'migration_status': None, 'attach_status':
"detached", "status": "available"
fields.VolumeAttachStatus.DETACHED, "status": "available"
}
FAKE_MAPPINGS = [{u'lun': 1}]
@ -280,7 +281,7 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
fake_mapping_to_other_host['mapRef'] = \
eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
fake_volume = get_fake_volume()
fake_volume['attach_status'] = "attached"
fake_volume['attach_status'] = fields.VolumeAttachStatus.ATTACHED
err = self.assertRaises(exception.NetAppDriverException,
host_mapper.map_volume_to_single_host,
@ -300,7 +301,7 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
fake_mapping_to_other_host['mapRef'] = \
eseries_fakes.MULTIATTACH_HOST_GROUP['clusterRef']
fake_volume = get_fake_volume()
fake_volume['attach_status'] = "attached"
fake_volume['attach_status'] = fields.VolumeAttachStatus.ATTACHED
err = self.assertRaises(exception.NetAppDriverException,
host_mapper.map_volume_to_single_host,

View File

@ -30,6 +30,7 @@ from six.moves import reduce
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_snapshot
@ -56,7 +57,7 @@ def get_fake_volume():
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'migration_status': None, 'attach_status':
"detached"
fields.VolumeAttachStatus.DETACHED
}

View File

@ -21,6 +21,7 @@ import mock
import requests
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.volume.drivers.dothill import dothill_client as dothill
from cinder.volume.drivers.dothill import dothill_common
@ -100,9 +101,10 @@ stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100}
vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2'
test_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
test_retype_volume = {'attach_status': 'available', 'id': vol_id,
'name_id': None, 'display_name': 'test volume',
'name': 'volume', 'size': 10}
test_retype_volume = {'attach_status': fields.VolumeAttachStatus.DETACHED,
'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume',
'size': 10}
test_host = {'capabilities': {'location_info':
'DotHillVolumeDriver:xxxxx:dg02:A'}}
test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
@ -117,11 +119,11 @@ dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
attached_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'in-use',
'attach_status': 'attached'}
'attach_status': fields.VolumeAttachStatus.ATTACHED}
attaching_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'attaching',
'attach_status': 'attached'}
'attach_status': fields.VolumeAttachStatus.ATTACHED}
detached_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'available',

View File

@ -29,6 +29,7 @@ from cinder import exception
import cinder.image.glance
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
@ -1094,7 +1095,7 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
'size': 20,
'status': 'creating',
'availability_zone': 'fake_zone',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'host': 'dummy'}
volume = objects.Volume(context=self.context, **db_volume)
volume.create()

View File

@ -394,10 +394,11 @@ class API(base.Base):
return
# Build required conditions for conditional update
expected = {'attach_status': db.Not('attached'),
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': None,
'group_id': None}
expected = {
'attach_status': db.Not(fields.VolumeAttachStatus.ATTACHED),
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': None,
'group_id': None}
# If not force deleting we have status conditions
if not force:
@ -647,7 +648,7 @@ class API(base.Base):
# user to see that the volume is 'detaching'. Having
# 'migration_status' set will have the same effect internally.
expected = {'status': 'in-use',
'attach_status': 'attached',
'attach_status': fields.VolumeAttachStatus.ATTACHED,
'migration_status': self.AVAILABLE_MIGRATION_STATUS}
result = volume.conditional_update({'status': 'detaching'}, expected)

View File

@ -1408,7 +1408,7 @@ class BaseVD(object):
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'availability_zone': volume.availability_zone,
'volume_type_id': volume.volume_type_id,
}

View File

@ -26,6 +26,7 @@ from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE
from cinder.objects import fields
from cinder.volume.drivers.dothill import dothill_client as dothill
LOG = logging.getLogger(__name__)
@ -200,7 +201,7 @@ class DotHillCommon(object):
Make sure that the volume is not in use when trying to copy it.
"""
if (volume['status'] != "available" or
volume['attach_status'] == "attached"):
volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):
LOG.error(_LE("Volume must be detached for clone operation."))
raise exception.VolumeAttached(volume_id=volume['id'])
@ -451,7 +452,7 @@ class DotHillCommon(object):
"""
false_ret = (False, None)
if volume['attach_status'] == "attached":
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret

View File

@ -25,6 +25,7 @@ from six.moves import range
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import utils
@ -66,7 +67,7 @@ def map_volume_to_single_host(client, volume, eseries_vol, host,
# If volume is not currently attached according to Cinder, it is
# safe to delete the mapping
if not (volume['attach_status'] == 'attached'):
if not (volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED):
LOG.debug("Volume %(vol)s is not currently attached, moving "
"existing mapping to host %(host)s.",
{'vol': volume['id'], 'host': host['label']})

View File

@ -512,7 +512,7 @@ class EntryCreateTask(flow_utils.CinderTask):
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'encryption_key_id': kwargs.pop('encryption_key_id'),
# Rename these to the internal name.
'display_description': kwargs.pop('description'),

View File

@ -20,6 +20,7 @@ from cinder import exception
from cinder import flow_utils
from cinder.i18n import _LE
from cinder import objects
from cinder.objects import fields
from cinder.volume.flows import common
LOG = logging.getLogger(__name__)
@ -56,7 +57,7 @@ class EntryCreateTask(flow_utils.CinderTask):
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'managing',
'attach_status': 'detached',
'attach_status': fields.VolumeAttachStatus.DETACHED,
# Rename these to the internal name.
'display_description': kwargs.pop('description'),
'display_name': kwargs.pop('name'),

View File

@ -661,7 +661,7 @@ class VolumeManager(manager.CleanableManager,
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
if vol_utils.extract_host(volume.host) != self.host:
@ -1055,9 +1055,10 @@ class VolumeManager(manager.CleanableManager,
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
self.db.volume_update(
context, volume_id, {
'status': 'available',
'attach_status': fields.VolumeAttachStatus.DETACHED})
return
self._notify_about_volume_usage(context, volume, "detach.start")
@ -1076,8 +1077,9 @@ class VolumeManager(manager.CleanableManager,
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
@ -1159,7 +1161,8 @@ class VolumeManager(manager.CleanableManager,
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
@ -1692,7 +1695,7 @@ class VolumeManager(manager.CleanableManager,
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
@ -2886,7 +2889,8 @@ class VolumeManager(manager.CleanableManager,
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
if (volume_ref['attach_status'] ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'