Incremental backup improvements for L

1. Add 'is_incremental=True' and 'has_dependent_backups=True/False' to
response body of querying.
2. Add parent_id to notification system.

Since we need to get volume has_dependent_backups value when querying
volume detail list, to reduce the performance impact, add index to
parent_id column in backup table.

APIImpact

When showing backup detail it will return additional info
"is_incremental": True/False and "has_dependent_backups": True/False

DocImpact
Change-Id: Id2fbf5616ba7bea847cf0443006800db89dd7c35
Implements:  blueprint cinder-incremental-backup-improvements-for-l
This commit is contained in:
wanghao 2015-06-04 17:50:12 +08:00
parent a953ecea5b
commit bc804e79f5
13 changed files with 194 additions and 14 deletions

View File

@ -76,7 +76,9 @@ class ViewBuilder(common.ViewBuilder):
'description': backup.get('display_description'),
'fail_reason': backup.get('fail_reason'),
'volume_id': backup.get('volume_id'),
'links': self._get_links(request, backup['id'])
'links': self._get_links(request, backup['id']),
'is_incremental': backup.is_incremental,
'has_dependent_backups': backup.has_dependent_backups,
}
}

View File

@ -361,6 +361,13 @@ class BackupManager(manager.SchedulerDependentManager):
backup.size = volume['size']
backup.availability_zone = self.az
backup.save()
# Handle the num_dependent_backups of parent backup when child backup
# has created successfully.
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
@ -513,7 +520,14 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.exception(_LE("Failed to update usages deleting backup"))
backup.destroy()
# If this backup is incremental backup, handle the
# num_dependent_backups of parent backup
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
if parent_backup.has_dependent_backups:
parent_backup.num_dependent_backups -= 1
parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,

View File

@ -0,0 +1,44 @@
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Integer, MetaData, Table
def upgrade(migrate_engine):
"""Add num_dependent_backups column to backups."""
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
num_dependent_backups = Column('num_dependent_backups', Integer, default=0)
backups.create_column(num_dependent_backups)
backups_list = list(backups.select().execute())
for backup in backups_list:
dep_bks_list = list(backups.select().where(backups.columns.parent_id ==
backup.id).execute())
if dep_bks_list:
backups.update().where(backups.columns.id == backup.id).values(
num_dependent_backups=len(dep_bks_list)).execute()
def downgrade(migrate_engine):
"""Remove num_dependent_backups column to backups."""
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
num_dependent_backups = backups.columns.num_dependent_backups
backups.drop_column(num_dependent_backups)

View File

@ -540,6 +540,7 @@ class Backup(BASE, CinderBase):
object_count = Column(Integer)
temp_volume_id = Column(String(36))
temp_snapshot_id = Column(String(36))
num_dependent_backups = Column(Integer)
@validates('fail_reason')
def validate_fail_reason(self, key, fail_reason):

View File

@ -36,7 +36,9 @@ LOG = logging.getLogger(__name__)
class Backup(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
# Version 1.1: Add new field num_dependent_backups and extra fields
# is_incremental and has_dependent_backups.
VERSION = '1.1'
fields = {
'id': fields.UUIDField(),
@ -65,14 +67,23 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
'temp_volume_id': fields.StringField(nullable=True),
'temp_snapshot_id': fields.StringField(nullable=True),
'num_dependent_backups': fields.IntegerField(),
}
obj_extra_fields = ['name']
obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']
@property
def name(self):
return CONF.backup_name_template % self.id
@property
def is_incremental(self):
return bool(self.parent_id)
@property
def has_dependent_backups(self):
return bool(self.num_dependent_backups)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
super(Backup, self).obj_make_compatible(primitive, target_version)

View File

@ -57,7 +57,8 @@ class BackupsAPITestCase(test.TestCase):
snapshot=False,
incremental=False,
parent_id=None,
size=0, object_count=0, host='testhost'):
size=0, object_count=0, host='testhost',
num_dependent_backups=0):
"""Create a backup object."""
backup = {}
backup['volume_id'] = volume_id
@ -75,6 +76,7 @@ class BackupsAPITestCase(test.TestCase):
backup['snapshot'] = snapshot
backup['incremental'] = incremental
backup['parent_id'] = parent_id
backup['num_dependent_backups'] = num_dependent_backups
return db.backup_create(context.get_admin_context(), backup)['id']
@staticmethod
@ -104,6 +106,8 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(0, res_dict['backup']['size'])
self.assertEqual('creating', res_dict['backup']['status'])
self.assertEqual(volume_id, res_dict['backup']['volume_id'])
self.assertFalse(res_dict['backup']['is_incremental'])
self.assertFalse(res_dict['backup']['has_dependent_backups'])
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
@ -207,7 +211,7 @@ class BackupsAPITestCase(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual(12, len(res_dict['backups'][0]))
self.assertEqual(14, len(res_dict['backups'][0]))
self.assertEqual('az1', res_dict['backups'][0]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][0]['container'])
@ -221,7 +225,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual('creating', res_dict['backups'][0]['status'])
self.assertEqual('1', res_dict['backups'][0]['volume_id'])
self.assertEqual(12, len(res_dict['backups'][1]))
self.assertEqual(14, len(res_dict['backups'][1]))
self.assertEqual('az1', res_dict['backups'][1]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][1]['container'])
@ -235,7 +239,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual('creating', res_dict['backups'][1]['status'])
self.assertEqual('1', res_dict['backups'][1]['volume_id'])
self.assertEqual(12, len(res_dict['backups'][2]))
self.assertEqual(14, len(res_dict['backups'][2]))
self.assertEqual('az1', res_dict['backups'][2]['availability_zone'])
self.assertEqual('volumebackups',
res_dict['backups'][2]['container'])
@ -1643,3 +1647,52 @@ class BackupsAPITestCase(test.TestCase):
backup = self.backup_api.get(self.context, backup_id)
self.assertRaises(exception.NotSupportedOperation,
self.backup_api.delete, self.context, backup, True)
def test_show_incremental_backup(self):
volume_id = utils.create_volume(self.context, size=5)['id']
parent_backup_id = self._create_backup(volume_id, status="available",
num_dependent_backups=1)
backup_id = self._create_backup(volume_id, status="available",
incremental=True,
parent_id=parent_backup_id,
num_dependent_backups=1)
child_backup_id = self._create_backup(volume_id, status="available",
incremental=True,
parent_id=backup_id)
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertTrue(res_dict['backup']['is_incremental'])
self.assertTrue(res_dict['backup']['has_dependent_backups'])
req = webob.Request.blank('/v2/fake/backups/%s' %
parent_backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertFalse(res_dict['backup']['is_incremental'])
self.assertTrue(res_dict['backup']['has_dependent_backups'])
req = webob.Request.blank('/v2/fake/backups/%s' %
child_backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertTrue(res_dict['backup']['is_incremental'])
self.assertFalse(res_dict['backup']['has_dependent_backups'])
db.backup_destroy(context.get_admin_context(), child_backup_id)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)

View File

@ -30,7 +30,8 @@ def fake_db_backup(**updates):
'display_description': 'fake_description',
'service_metadata': 'fake_metadata',
'service': 'fake_service',
'object_count': 5
'object_count': 5,
'num_dependent_backups': 0,
}
for name, field in objects.Backup.fields.items():

View File

@ -86,7 +86,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
self.assertEqual('3', backup.temp_snapshot_id)
def test_import_record(self):
backup = objects.Backup(context=self.context, id=1)
backup = objects.Backup(context=self.context, id=1, parent_id=None,
num_dependent_backups=0)
export_string = backup.encode_record()
imported_backup = objects.Backup.decode_record(export_string)
@ -94,7 +95,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
self.assertDictEqual(dict(backup), imported_backup)
def test_import_record_additional_info(self):
backup = objects.Backup(context=self.context, id=1)
backup = objects.Backup(context=self.context, id=1, parent_id=None,
num_dependent_backups=0)
extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}}
extra_info_copy = extra_info.copy()
export_string = backup.encode_record(extra_info=extra_info)
@ -110,7 +112,8 @@ class TestBackup(test_objects.BaseObjectsTestCase):
self.assertDictEqual(expected, imported_backup)
def test_import_record_additional_info_cant_overwrite(self):
backup = objects.Backup(context=self.context, id=1)
backup = objects.Backup(context=self.context, id=1, parent_id=None,
num_dependent_backups=0)
export_string = backup.encode_record(id='fake_id')
imported_backup = objects.Backup.decode_record(export_string)

View File

@ -758,6 +758,16 @@ class BackupTestCase(BaseBackupTest):
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
def test_backup_has_dependent_backups(self):
"""Test backup has dependent backups.
Test the query of has_dependent_backups in backup object is correct.
"""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""

View File

@ -1723,7 +1723,8 @@ class DBAPIBackupTestCase(BaseTest):
'size': 1000,
'object_count': 100,
'temp_volume_id': 'temp_volume_id',
'temp_snapshot_id': 'temp_snapshot_id', }
'temp_snapshot_id': 'temp_snapshot_id',
'num_dependent_backups': 0, }
if one:
return base_values

View File

@ -883,6 +883,15 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertNotIn('object_current_version', services.c)
self.assertNotIn('object_available_version', services.c)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
sqlalchemy.types.INTEGER)
def _post_downgrade_054(self, engine):
backups = db_utils.get_table(engine, 'backups')
self.assertNotIn('num_dependent_backups', backups.c)
def test_walk_versions(self):
self.walk_versions(True, False)

View File

@ -351,6 +351,33 @@ class NotifyUsageTestCase(test.TestCase):
'cgsnapshot.test_suffix',
mock_usage.return_value)
def test_usage_from_backup(self):
raw_backup = {
'project_id': '12b0330ec2584a',
'user_id': '158cba1b8c2bb6008e',
'availability_zone': 'nova',
'id': 'fake_id',
'host': 'fake_host',
'display_name': 'test_backup',
'created_at': '2014-12-11T10:10:00',
'status': 'available',
'volume_id': 'fake_volume_id',
'size': 1,
'service_metadata': None,
'service': 'cinder.backup.drivers.swift',
'fail_reason': None,
'parent_id': 'fake_parent_id',
'num_dependent_backups': 0,
}
# Make it easier to find out differences between raw and expected.
expected_backup = raw_backup.copy()
expected_backup['tenant_id'] = expected_backup.pop('project_id')
expected_backup['backup_id'] = expected_backup.pop('id')
usage_info = volume_utils._usage_from_backup(raw_backup)
self.assertEqual(expected_backup, usage_info)
class LVMVolumeDriverTestCase(test.TestCase):
def test_convert_blocksize_option(self):

View File

@ -88,6 +88,7 @@ def _usage_from_volume(context, volume_ref, **kw):
def _usage_from_backup(backup_ref, **kw):
num_dependent_backups = backup_ref['num_dependent_backups']
usage_info = dict(tenant_id=backup_ref['project_id'],
user_id=backup_ref['user_id'],
availability_zone=backup_ref['availability_zone'],
@ -100,7 +101,10 @@ def _usage_from_backup(backup_ref, **kw):
size=backup_ref['size'],
service_metadata=backup_ref['service_metadata'],
service=backup_ref['service'],
fail_reason=backup_ref['fail_reason'])
fail_reason=backup_ref['fail_reason'],
parent_id=backup_ref['parent_id'],
num_dependent_backups=num_dependent_backups,
)
usage_info.update(kw)
return usage_info