From 4b9233c7e5b46c8f6bcc5b41916310c77d04a5f7 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Wed, 29 Mar 2017 14:40:33 +0800 Subject: [PATCH] Fix error status check when create backup Now when creating backup from snapshot, the volume's status is still checked. This patch is a quick fix. Change-Id: I3a477ed622f076de9451741d198ac1d62c8d5b40 Related-Bug: #1670541 --- cinder/backup/api.py | 14 ++++----- cinder/backup/manager.py | 38 +++++++++++++++---------- cinder/tests/unit/backup/test_backup.py | 31 ++++++++++++++++---- 3 files changed, 56 insertions(+), 27 deletions(-) diff --git a/cinder/backup/api.py b/cinder/backup/api.py index fb8e0d7c49a..232458bb789 100644 --- a/cinder/backup/api.py +++ b/cinder/backup/api.py @@ -215,20 +215,20 @@ class API(base.Base): % {'vol1': volume_id, 'vol2': snapshot.volume_id}) raise exception.InvalidVolume(reason=msg) - if volume['status'] not in ["available", "in-use"]: + if snapshot['status'] not in ["available"]: + msg = (_('Snapshot to be backed up must be available, ' + 'but the current status is "%s".') + % snapshot['status']) + raise exception.InvalidSnapshot(reason=msg) + elif volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) - elif volume['status'] in ["in-use"] and not snapshot_id and not force: + elif volume['status'] in ["in-use"] and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) - elif snapshot_id and snapshot['status'] not in ["available"]: - msg = (_('Snapshot to be backed up must be available, ' - 'but the current status is "%s".') - % snapshot['status']) - raise exception.InvalidSnapshot(reason=msg) previous_status = volume['status'] volume_host = volume_utils.extract_host(volume.host, 'host') diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py index 21184828876..86c9d6ceaf2 100644 --- a/cinder/backup/manager.py +++ b/cinder/backup/manager.py @@ -360,9 +360,18 @@ class BackupManager(manager.ThreadPoolManager): snapshot = objects.Snapshot.get_by_id( context, snapshot_id) if snapshot_id else None previous_status = volume.get('previous_status', None) - LOG.info('Create backup started, backup: %(backup_id)s ' - 'volume: %(volume_id)s.', - {'backup_id': backup.id, 'volume_id': volume_id}) + if snapshot_id: + log_message = ('Create backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s snapshot: %(snapshot_id)s.' + % {'backup_id': backup.id, + 'volume_id': volume_id, + 'snapshot_id': snapshot_id}) + else: + log_message = ('Create backup started, backup: %(backup_id)s ' + 'volume: %(volume_id)s.' + % {'backup_id': backup.id, + 'volume_id': volume_id}) + LOG.info(log_message) self._notify_about_backup_usage(context, backup, "create.start") @@ -371,19 +380,8 @@ class BackupManager(manager.ThreadPoolManager): backup.availability_zone = self.az backup.save() - expected_status = 'available' if snapshot_id else "backing-up" - actual_status = volume['status'] - if actual_status != expected_status: - err = _('Create backup aborted, expected volume status ' - '%(expected_status)s but got %(actual_status)s.') % { - 'expected_status': expected_status, - 'actual_status': actual_status, - } - self._update_backup_error(backup, err) - raise exception.InvalidVolume(reason=err) - + expected_status = "backing-up" if snapshot_id: - expected_status = fields.SnapshotStatus.BACKING_UP actual_status = snapshot['status'] if actual_status != expected_status: err = _('Create backup aborted, expected snapshot status ' @@ -393,6 +391,16 @@ class BackupManager(manager.ThreadPoolManager): } self._update_backup_error(backup, err) raise exception.InvalidSnapshot(reason=err) + else: + actual_status = volume['status'] + if actual_status != expected_status: + err = _('Create backup aborted, expected volume status ' + '%(expected_status)s but got %(actual_status)s.') % { + 'expected_status': expected_status, + 'actual_status': actual_status, + } + self._update_backup_error(backup, err) + raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py index 3e44527bf79..1adc01d461d 100644 --- a/cinder/tests/unit/backup/test_backup.py +++ b/cinder/tests/unit/backup/test_backup.py @@ -620,11 +620,14 @@ class BackupTestCase(BaseBackupTest): self.assertEqual(vol_size, backup['size']) @mock.patch('cinder.backup.manager.BackupManager._run_backup') - @ddt.data(fields.SnapshotStatus.BACKING_UP, - fields.SnapshotStatus.AVAILABLE) - def test_create_backup_with_snapshot(self, snapshot_status, + @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), + (fields.SnapshotStatus.BACKING_UP, 'in-use'), + (fields.SnapshotStatus.AVAILABLE, 'available'), + (fields.SnapshotStatus.AVAILABLE, 'in-use')) + @ddt.unpack + def test_create_backup_with_snapshot(self, snapshot_status, volume_status, mock_run_backup): - vol_id = self._create_volume_db_entry(status='available') + vol_id = self._create_volume_db_entry(status=volume_status) snapshot = self._create_snapshot_db_entry(volume_id=vol_id, status=snapshot_status) backup = self._create_backup_db_entry(volume_id=vol_id, @@ -635,7 +638,7 @@ class BackupTestCase(BaseBackupTest): vol = objects.Volume.get_by_id(self.ctxt, vol_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) - self.assertEqual('available', vol.status) + self.assertEqual(volume_status, vol.status) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) else: self.assertRaises(exception.InvalidSnapshot, @@ -1518,6 +1521,24 @@ class BackupAPITestCase(BaseBackupTest): backup = self.api.create(self.ctxt, None, None, volume_id, None) self.assertEqual('testhost', backup.host) + @mock.patch.object(api.API, '_get_available_backup_service_host', + return_value='fake_host') + @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') + def test_create_backup_from_snapshot_with_volume_in_use( + self, mock_create, mock_get_service): + self.ctxt.user_id = 'fake_user' + self.ctxt.project_id = 'fake_project' + volume_id = self._create_volume_db_entry(status='in-use') + snapshot = self._create_snapshot_db_entry(volume_id=volume_id) + backup = self.api.create(self.ctxt, None, None, volume_id, None, + snapshot_id=snapshot.id) + + self.assertEqual(fields.BackupStatus.CREATING, backup.status) + volume = objects.Volume.get_by_id(self.ctxt, volume_id) + snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) + self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) + self.assertEqual('in-use', volume.status) + @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')