Add Redis backup/restore functionality

Performs backup using the Redis client to persist data to the file
system, then streams the result to swift.

Performs restore by replacing the data file with the Swift backup
and starting the server again in the correct manner.

Note: Running the int-tests require that volume_support is set
to false in the test.conf file.

To run:
./redstack install
./redstack kick-start redis
(vi /etc/trove/test.conf and change volume_support to false)
./redstack int-tests --group=backup (or --group=redis_supported)

Co-Authored-by: hardy.jung <hardy.jung@daumkakao.com>
Co-Authored-by: Peter Stachowski <peter@tesora.com>

Depends-On: I633273d438c22f98bef2fd1535730bcdb5e5cff0
Implements: blueprint redis-backup-restore
Change-Id: I1bd391f8e3f7de12396fb41000e3c55be23c04ee
This commit is contained in:
Peter Stachowski 2015-08-07 18:02:16 -04:00
parent 368fd674ce
commit 8f8a758539
15 changed files with 967 additions and 56 deletions

View File

@ -1,5 +1,6 @@
[DEFAULT]
#=========== RPC Configuration ======================
# URL representing the messaging driver to use and its full configuration.
@ -15,6 +16,7 @@
# overridden by an exchange name specified in the 'transport_url option.
control_exchange = trove
# ========== Sample RabbitMQ Configuration ==========
# The RabbitMQ broker address where a single node is used.
@ -40,6 +42,7 @@ rabbit_password=f7999d1955c5014aa32c
# The RabbitMQ virtual host. (string value)
# rabbit_virtual_host=/
# ========== Configuration options for Swift ==========
# The swift_url can be specified directly or fetched from Keystone catalog.
@ -51,7 +54,9 @@ rabbit_password=f7999d1955c5014aa32c
# Service type to use when searching catalog.
# swift_service_type = object-store
# ========== Datastore Manager Configurations ==========
# Datastore manager implementations.
# Format: list of 'datastore-type:datastore.manager.implementation.module'
# datastore_registry_ext = mysql:trove.guestagent.datastore.mysql.manager.Manager, percona:trove.guestagent.datastore.mysql.manager.Manager
@ -93,6 +98,7 @@ root_grant_option = True
# backup_chunk_size = 65536
# backup_segment_max_size = 2147483648
# ========== Sample Logging Configuration ==========
# Show more verbose log output (sets INFO log level output)
@ -111,10 +117,10 @@ log_file = logfile.txt
# If False doesn't trace SQL requests.
#trace_sqlalchemy = True
# ========== Datastore Specific Configuration Options ==========
[mysql]
# For mysql, the following are the defaults for backup, and restore:
# backup_strategy = InnoBackupEx
# backup_namespace = trove.guestagent.strategies.backup.mysql_impl
@ -131,3 +137,9 @@ log_file = logfile.txt
# mount_point = /var/lib/vertica
# readahead_size = 2048
# guestagent_strategy = trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy
[redis]
# For redis, the following are the defaults for backup, and restore:
# backup_strategy = RedisBackup
# backup_namespace = trove.guestagent.strategies.backup.experimental.redis_impl
# restore_namespace = trove.guestagent.strategies.restore.experimental.redis_impl

View File

@ -553,7 +553,7 @@ redis_opts = [
help='List of UDP ports and/or port ranges to open '
'in the security group (only applicable '
'if trove_security_groups_support is True).'),
cfg.StrOpt('backup_strategy', default=None,
cfg.StrOpt('backup_strategy', default='RedisBackup',
help='Default strategy to perform backups.',
deprecated_name='backup_strategy',
deprecated_group='DEFAULT'),
@ -572,11 +572,15 @@ redis_opts = [
help='Whether to provision a Cinder volume for datadir.'),
cfg.StrOpt('device_path', default=None,
help='Device path for volume if volume support is enabled.'),
cfg.StrOpt('backup_namespace', default=None,
cfg.StrOpt('backup_namespace',
default="trove.guestagent.strategies.backup.experimental."
"redis_impl",
help='Namespace to load backup strategies from.',
deprecated_name='backup_namespace',
deprecated_group='DEFAULT'),
cfg.StrOpt('restore_namespace', default=None,
cfg.StrOpt('restore_namespace',
default="trove.guestagent.strategies.restore.experimental."
"redis_impl",
help='Namespace to load restore strategies from.',
deprecated_name='restore_namespace',
deprecated_group='DEFAULT'),

View File

@ -20,9 +20,9 @@ from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent import backup
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis.service import (
RedisApp)
from trove.guestagent.datastore.experimental.redis import service
from trove.guestagent import dbaas
from trove.guestagent import volume
@ -40,7 +40,7 @@ class Manager(periodic_task.PeriodicTasks):
def __init__(self):
super(Manager, self).__init__(CONF)
self._app = RedisApp()
self._app = service.RedisApp()
@periodic_task.periodic_task
def update_status(self, context):
@ -73,13 +73,16 @@ class Manager(periodic_task.PeriodicTasks):
self._app.reset_configuration(configuration)
def _perform_restore(self, backup_info, context, restore_location, app):
"""
Perform a restore on this instance,
currently it is not implemented.
"""
LOG.debug("Perform restore called.")
raise exception.DatastoreOperationNotSupported(
operation='_perform_restore', datastore=MANAGER)
"""Perform a restore on this instance."""
LOG.info(_("Restoring database from backup %s.") % backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
LOG.exception(_("Error performing restore from backup %s.") %
backup_info['id'])
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully."))
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
@ -105,7 +108,13 @@ class Manager(periodic_task.PeriodicTasks):
LOG.info(_('Writing redis configuration.'))
self._app.configuration_manager.save_configuration(config_contents)
self._app.apply_initial_guestagent_configuration()
self._app.restart()
if backup_info:
persistence_dir = self._app.get_working_dir()
self._perform_restore(backup_info, context, persistence_dir,
self._app)
self._app.status.end_install_or_restart()
else:
self._app.restart()
LOG.info(_('Redis instance has been setup and configured.'))
except Exception:
LOG.exception(_("Error setting up Redis instance."))
@ -145,13 +154,9 @@ class Manager(periodic_task.PeriodicTasks):
return dbaas.get_filesystem_volume_stats(mount_point)
def create_backup(self, context, backup_info):
"""
This will eventually create a backup. Right now
it does nothing.
"""
LOG.debug("Create Backup called.")
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
"""Create a backup of the database."""
LOG.debug("Creating backup.")
backup.backup(context, backup_info)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)

View File

@ -328,6 +328,11 @@ class RedisApp(object):
"""
return self.get_configuration_property('dir')
def get_persistence_filepath(self):
"""Returns the full path to the persistence file."""
return guestagent_utils.build_file_path(
self.get_working_dir(), self.get_db_filename())
def get_auth_password(self):
"""Client authentication password for this instance or None if not set.
"""
@ -404,6 +409,12 @@ class RedisAdmin(object):
"""
return self.__client.ping()
def get_info(self, section=None):
return self.__client.info(section=section)
def persist_data(self):
return self.__client.save()
def config_set(self, name, value):
response = self.execute(
'%s %s' % (self.__config_cmd_name, 'SET'), name, value)
@ -417,18 +428,38 @@ class RedisAdmin(object):
"""
return response and redis.client.bool_ok(response)
def execute(self, cmd_name, *cmd_args):
def execute(self, cmd_name, *cmd_args, **options):
"""Execute a command and return a parsed response.
"""
try:
return self._execute_command(cmd_name, *cmd_args)
return self.__client.execute_command(cmd_name, *cmd_args,
**options)
except Exception as e:
LOG.exception(e)
raise exception.TroveError(
_("Redis command '%(cmd_name)s %(cmd_args)s' failed.")
% {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)})
def _execute_command(self, *args, **options):
"""Execute a command and return a parsed response.
"""
return self.__client.execute_command(*args, **options)
def wait_until(self, key, wait_value, section=None,
timeout=CONF.usage_timeout):
"""Polls redis until the specified 'key' changes to 'wait_value'."""
LOG.debug("Waiting for Redis '%s' to be: %s." % (key, wait_value))
def _check_info():
redis_info = self.get_info(section)
if key in redis_info:
current_value = redis_info[key]
LOG.debug("Found '%s' for field %s." % (current_value, key))
else:
LOG.error(_('Output from Redis command: %s') % redis_info)
raise RuntimeError(_("Field %(field)s not found "
"(Section: '%(sec)s').") %
({'field': key, 'sec': section}))
return current_value == wait_value
try:
utils.poll_until(_check_info, time_out=timeout)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis field "
"'%(field)s' to change to '%(val)s'.") %
{'field': key, 'val': wait_value})

View File

@ -16,11 +16,11 @@
"""
Determines operating system version and OS dependent commands.
"""
from trove.guestagent.common.operating_system import get_os
REDIS_OWNER = 'redis'
OS = get_os()
REDIS_OWNER = 'redis'
REDIS_CONFIG = '/etc/redis/redis.conf'
REDIS_PID_FILE = '/var/run/redis/redis-server.pid'
REDIS_LOG_FILE = '/var/log/redis/server.log'
@ -32,6 +32,7 @@ REDIS_BIN = '/usr/bin/redis-server'
REDIS_PACKAGE = 'redis-server'
SERVICE_CANDIDATES = ['redis-server']
OS = get_os()
if OS is 'redhat':
REDIS_CONFIG = '/etc/redis.conf'
REDIS_PACKAGE = 'redis'

View File

@ -0,0 +1,39 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.guestagent.datastore.experimental.redis import service
from trove.guestagent.strategies.backup import base
LOG = logging.getLogger(__name__)
class RedisBackup(base.BackupRunner):
"""Implementation of Backup Strategy for Redis."""
__strategy_name__ = 'redisbackup'
def __init__(self, filename, **kwargs):
self.app = service.RedisApp()
super(RedisBackup, self).__init__(filename, **kwargs)
@property
def cmd(self):
cmd = 'sudo cat %s' % self.app.get_persistence_filepath()
return cmd + self.zip_cmd + self.encrypt_cmd
def _run_pre_backup(self):
self.app.admin.persist_data()
LOG.debug('Redis data persisted.')

View File

@ -0,0 +1,73 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.redis import service
from trove.guestagent.datastore.experimental.redis import system
from trove.guestagent.strategies.restore import base
LOG = logging.getLogger(__name__)
class RedisBackup(base.RestoreRunner):
"""Implementation of Restore Strategy for Redis."""
__strategy_name__ = 'redisbackup'
CONF_LABEL_AOF_TEMP_OFF = 'restore_aof_temp_off'
INFO_PERSISTENCE_SECTION = 'persistence'
def __init__(self, storage, **kwargs):
self.app = service.RedisApp()
self.restore_location = self.app.get_persistence_filepath()
self.base_restore_cmd = 'tee %s' % self.restore_location
self.aof_set = self.app.is_appendonly_enabled()
self.aof_off_cfg = {'appendonly': 'no'}
kwargs.update({'restore_location': self.restore_location})
super(RedisBackup, self).__init__(storage, **kwargs)
def pre_restore(self):
self.app.stop_db()
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
operating_system.chmod(self.restore_location, FileMode.SET_FULL,
as_root=True)
utils.clean_out(self.restore_location)
# IF AOF is set, we need to turn it off temporarily
if self.aof_set:
self.app.configuration_manager.apply_system_override(
self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF)
def post_restore(self):
operating_system.chown(self.restore_location,
system.REDIS_OWNER, system.REDIS_OWNER,
as_root=True)
self.app.start_redis()
# IF AOF was set, we need to put back the original file
if self.aof_set:
self.app.admin.wait_until('loading', '0',
section=self.INFO_PERSISTENCE_SECTION)
self.app.admin.execute('BGREWRITEAOF')
self.app.admin.wait_until('aof_rewrite_in_progress', '0',
section=self.INFO_PERSISTENCE_SECTION)
self.app.stop_db()
self.app.configuration_manager.remove_system_override(
change_id=self.CONF_LABEL_AOF_TEMP_OFF)
self.app.start_redis()

View File

@ -31,6 +31,7 @@ from trove.tests.api import root
from trove.tests.api import user_access
from trove.tests.api import users
from trove.tests.api import versions
from trove.tests.scenario.groups import backup_group
from trove.tests.scenario.groups import cluster_actions_group
from trove.tests.scenario.groups import instance_actions_group
from trove.tests.scenario.groups import instance_delete_group
@ -117,29 +118,32 @@ initial_groups = [
instances.GROUP_START_SIMPLE,
instance_delete_group.GROUP
]
instance_actions_groups = list(initial_groups)
instance_actions_groups.extend([instance_actions_group.GROUP])
backup_groups = list(initial_groups)
backup_groups.extend([backup_group.GROUP])
cluster_actions_groups = list(initial_groups)
cluster_actions_groups.extend([cluster_actions_group.GROUP,
negative_cluster_actions_group.GROUP])
instance_actions_groups = list(initial_groups)
instance_actions_groups.extend([instance_actions_group.GROUP])
replication_groups = list(initial_groups)
replication_groups.extend([replication_group.GROUP])
# Module based groups
register(["instance_actions"], instance_actions_groups)
register(["backup"], backup_groups)
register(["cluster"], cluster_actions_groups)
register(["instance_actions"], instance_actions_groups)
register(["replication"], replication_groups)
# Datastore based groups - these should contain all functionality
# currently supported by the datastore
register(["cassandra_supported"], instance_actions_groups)
register(["couchbase_supported"], instance_actions_groups)
register(["postgresql_supported"], instance_actions_groups)
register(["mongodb_supported"], instance_actions_groups,
cluster_actions_groups)
register(["mysql_supported"], instance_actions_groups, replication_groups)
register(["redis_supported"], instance_actions_groups)
register(["vertica_supported"], instance_actions_groups,
cluster_actions_groups)
register(["cassandra_group"], backup_groups, instance_actions_groups)
register(["couchbase_group"], instance_actions_groups)
register(["postgresql_group"], backup_groups, instance_actions_groups)
register(["mongodb_group"], backup_groups, cluster_actions_groups,
instance_actions_groups)
register(["mysql_group"], backup_groups, instance_actions_groups,
replication_groups)
register(["redis_group"], backup_groups, instance_actions_groups)
register(["vertica_group"], cluster_actions_groups, instance_actions_groups)

View File

@ -0,0 +1,192 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.backup_restore_group"
GROUP_BACKUP = "scenario.backup_group"
GROUP_BACKUP_LIST = "scenario.backup_list_group"
GROUP_RESTORE = "scenario.restore_group"
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
groups=[GROUP])
class BackupGroup(TestGroup):
"""Test Backup and Restore functionality."""
def __init__(self):
super(BackupGroup, self).__init__(
'backup_runners', 'BackupRunner')
@test(groups=[GROUP_BACKUP])
def backup_create_instance_invalid(self):
"""Ensure create backup fails with invalid instance id."""
self.test_runner.run_backup_create_instance_invalid()
@test(groups=[GROUP_BACKUP])
def backup_create_instance_not_found(self):
"""Ensure create backup fails with unknown instance id."""
self.test_runner.run_backup_create_instance_not_found()
@test(groups=[GROUP_BACKUP])
def add_data_for_backup(self):
"""Add data to instance for restore verification."""
self.test_runner.run_add_data_for_backup()
@test(groups=[GROUP_BACKUP],
runs_after=[add_data_for_backup])
def verify_data_for_backup(self):
"""Verify data in instance."""
self.test_runner.run_verify_data_for_backup()
@test(groups=[GROUP_BACKUP],
runs_after=[verify_data_for_backup])
def backup_create(self):
"""Check that create backup is started successfully."""
self.test_runner.run_backup_create()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create])
def restore_instance_from_not_completed_backup(self):
"""Ensure a restore fails while the backup is running."""
self.test_runner.run_restore_instance_from_not_completed_backup()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create],
runs_after=[restore_instance_from_not_completed_backup])
def instance_action_right_after_backup_create(self):
"""Ensure any instance action fails while backup is running."""
self.test_runner.run_instance_action_right_after_backup_create()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create],
runs_after=[instance_action_right_after_backup_create])
def backup_create_another_backup_running(self):
"""Ensure create backup fails when another backup is running."""
self.test_runner.run_backup_create_another_backup_running()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create],
runs_after=[backup_create_another_backup_running])
def backup_delete_while_backup_running(self):
"""Ensure delete backup fails while it is running."""
self.test_runner.run_backup_delete_while_backup_running()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create],
runs_after=[backup_delete_while_backup_running])
def backup_create_completed(self):
"""Check that the backup completes successfully."""
self.test_runner.run_backup_create_completed()
# TODO(peterstac) - Add support for incremental backups
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_list(self):
"""Test list backups."""
self.test_runner.run_backup_list()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_list_filter_datastore(self):
"""Test list backups and filter by datastore."""
self.test_runner.run_backup_list_filter_datastore()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_list_filter_different_datastore(self):
"""Test list backups and filter by different datastore."""
self.test_runner.run_backup_list_filter_different_datastore()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_list_filter_datastore_not_found(self):
"""Test list backups and filter by unknown datastore."""
self.test_runner.run_backup_list_filter_datastore_not_found()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_list_for_instance(self):
"""Test backup list for instance."""
self.test_runner.run_backup_list_for_instance()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_get(self):
"""Test backup show."""
self.test_runner.run_backup_get()
@test(groups=[GROUP_BACKUP, GROUP_BACKUP_LIST],
depends_on=[backup_create_completed])
def backup_get_unauthorized_user(self):
"""Ensure backup show fails for an unauthorized user."""
self.test_runner.run_backup_get_unauthorized_user()
@test(groups=[GROUP_RESTORE],
depends_on=[backup_create_completed],
runs_after_groups=[GROUP_BACKUP_LIST])
def restore_from_backup(self):
"""Check that restoring an instance from a backup starts."""
self.test_runner.run_restore_from_backup()
@test(groups=[GROUP_RESTORE],
depends_on=[restore_from_backup])
def restore_from_backup_completed(self):
"""Wait until restoring an instance from a backup completes."""
self.test_runner.run_restore_from_backup_completed()
@test(groups=[GROUP_RESTORE],
depends_on=[restore_from_backup_completed])
def verify_data_in_restored_instance(self):
"""Verify data in restored instance."""
self.test_runner.run_verify_data_in_restored_instance()
@test(groups=[GROUP_RESTORE],
depends_on=[restore_from_backup_completed],
runs_after=[verify_data_in_restored_instance])
def delete_restored_instance(self):
"""Test deleting the restored instance."""
self.test_runner.run_delete_restored_instance()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create_completed],
runs_after=[delete_restored_instance])
def delete_unknown_backup(self):
"""Ensure deleting an unknown backup fails."""
self.test_runner.run_delete_unknown_backup()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create_completed],
runs_after=[delete_unknown_backup])
def delete_backup_unauthorized_user(self):
"""Ensure deleting backup by an unauthorized user fails."""
self.test_runner.run_delete_backup_unauthorized_user()
@test(groups=[GROUP_BACKUP],
depends_on=[backup_create_completed],
runs_after=[delete_backup_unauthorized_user])
def delete_backup(self):
"""Test deleting the backup."""
self.test_runner.run_delete_backup()
@test(depends_on=[delete_backup])
def check_for_incremental_backup(self):
"""Test that backup children are deleted."""
self.test_runner.run_check_for_incremental_backup()

View File

@ -16,6 +16,7 @@
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.scenario.groups import backup_group
from trove.tests.scenario.groups import instance_actions_group
from trove.tests.scenario.groups import replication_group
from trove.tests.scenario.groups.test_group import TestGroup
@ -25,7 +26,7 @@ GROUP = "scenario.instance_delete_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after_groups=[replication_group.GROUP,
runs_after_groups=[backup_group.GROUP_BACKUP, replication_group.GROUP,
instance_actions_group.GROUP])
class InstanceDeleteGroup(TestGroup):

View File

@ -16,14 +16,14 @@
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups import backup_group
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.replication_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after=[WaitForGuestInstallationToFinish])
runs_after=[backup_group.GROUP_BACKUP])
class ReplicationGroup(TestGroup):
def __init__(self):

View File

@ -0,0 +1,347 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from troveclient.compat import exceptions
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove.tests.config import CONFIG
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
class BackupRunner(TestRunner):
def __init__(self):
self.TIMEOUT_BACKUP_CREATE = 60 * 30
self.TIMEOUT_BACKUP_DELETE = 120
super(BackupRunner, self).__init__(sleep_time=20,
timeout=self.TIMEOUT_BACKUP_CREATE)
self.BACKUP_NAME = 'backup_test'
self.BACKUP_DESC = 'test description'
self.backup_host = None
self.backup_info = None
self.backup_count_prior_to_create = 0
self.backup_count_for_instance_prior_to_create = 0
self.incremental_backup_info = None
self.restore_instance_id = 0
self.restore_host = None
self.other_client = None
def run_backup_create_instance_invalid(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
invalid_inst_id = 'invalid-inst-id'
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC)
def run_backup_create_instance_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.create,
self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC)
def run_add_data_for_backup(self):
self.backup_host = self.get_instance_host()
self.assert_add_data_for_backup(self.backup_host)
def assert_add_data_for_backup(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_large_data' method.
"""
self.test_helper.add_data(DataType.large, host)
def run_verify_data_for_backup(self):
self.assert_verify_backup_data(self.backup_host)
def assert_verify_backup_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'verify_large_data' method.
"""
self.test_helper.verify_data(DataType.large, host)
def run_backup_create(self):
self.assert_backup_create()
def assert_backup_create(self):
# Necessary to test that the count increases.
self.backup_count_prior_to_create = len(
self.auth_client.backups.list())
self.backup_count_for_instance_prior_to_create = len(
self.auth_client.instances.backups(self.instance_info.id))
result = self.auth_client.backups.create(
self.BACKUP_NAME, self.instance_info.id, self.BACKUP_DESC)
self.backup_info = result
self.assert_equal(self.BACKUP_NAME, result.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, result.description,
'Unexpected backup description')
self.assert_equal(self.instance_info.id, result.instance_id,
'Unexpected instance ID for backup')
self.assert_equal('NEW', result.status,
'Unexpected status for backup')
instance = self.auth_client.instances.get(
self.instance_info.id)
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal('BACKUP', instance.status,
'Unexpected instance status')
self.assert_equal(self.instance_info.dbaas_datastore,
result.datastore['type'],
'Unexpected datastore')
self.assert_equal(self.instance_info.dbaas_datastore_version,
result.datastore['version'],
'Unexpected datastore version')
self.assert_equal(datastore_version.id, result.datastore['version_id'],
'Unexpected datastore version id')
def run_restore_instance_from_not_completed_backup(
self, expected_exception=exceptions.Conflict,
expected_http_code=409):
self.assert_raises(
expected_exception, expected_http_code,
self._restore_from_backup, self.backup_info.id)
def run_instance_action_right_after_backup_create(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.instances.resize_instance,
self.instance_info.id, 1)
def run_backup_create_another_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.create,
'backup_test2', self.instance_info.id,
'test description2')
def run_backup_delete_while_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
result = self.auth_client.backups.list()
backup = result[0]
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.backups.delete, backup.id)
def run_backup_create_completed(self):
self._verify_backup(self.backup_info.id)
def _verify_backup(self, backup_id):
def _result_is_active():
backup = self.auth_client.backups.get(backup_id)
if backup.status == 'COMPLETED':
return True
else:
self.assert_not_equal('FAILED', backup.status,
'Backup status should not be')
return False
poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE)
def run_backup_list(self):
backup_list = self.auth_client.backups.list()
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def assert_backup_list(self, backup_list, expected_count):
self.assert_equal(expected_count, len(backup_list),
'Unexpected number of backups found')
if expected_count:
backup = backup_list[0]
self.assert_equal(self.BACKUP_NAME, backup.name,
'Unexpected backup name')
self.assert_equal(self.BACKUP_DESC, backup.description,
'Unexpected backup description')
self.assert_not_equal(0.0, backup.size, 'Unexpected backup size')
self.assert_equal(self.instance_info.id, backup.instance_id,
'Unexpected instance id')
self.assert_equal('COMPLETED', backup.status,
'Unexpected backup status')
def run_backup_list_filter_datastore(self):
backup_list = self.auth_client.backups.list(
datastore=self.instance_info.dbaas_datastore)
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def run_backup_list_filter_different_datastore(self):
backup_list = self.auth_client.backups.list(
datastore='Test_Datastore_1')
self.assert_backup_list(backup_list, 0)
def run_backup_list_filter_datastore_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.list,
datastore='NOT_FOUND')
def run_backup_list_for_instance(self):
backup_list = self.auth_client.instances.backups(
self.instance_info.id)
self.assert_backup_list(backup_list,
self.backup_count_prior_to_create + 1)
def run_backup_get(self):
backup = self.auth_client.backups.get(self.backup_info.id)
self.assert_backup_list([backup], 1)
self.assert_equal(self.instance_info.dbaas_datastore,
backup.datastore['type'],
'Unexpected datastore type')
self.assert_equal(self.instance_info.dbaas_datastore_version,
backup.datastore['version'],
'Unexpected datastore version')
datastore_version = self.auth_client.datastore_versions.get(
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version)
self.assert_equal(datastore_version.id, backup.datastore['version_id'])
def run_backup_get_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self._create_other_client()
self.assert_raises(
expected_exception, None,
self.other_client.backups.get, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.other_client)
def _create_other_client(self):
if not self.other_client:
requirements = Requirements(is_admin=False)
other_user = CONFIG.users.find_user(
requirements, black_list=[self.instance_info.user.auth_user])
self.other_client = create_dbaas_client(other_user)
def run_restore_from_backup(self):
self.assert_restore_from_backup(self.backup_info.id)
def assert_restore_from_backup(self, backup_ref):
result = self._restore_from_backup(backup_ref)
# TODO(peterstac) - This should probably return code 202
self.assert_client_code(200)
self.assert_equal('BUILD', result.status,
'Unexpected instance status')
self.restore_instance_id = result.id
def _restore_from_backup(self, backup_ref):
restore_point = {'backupRef': backup_ref}
result = self.auth_client.instances.create(
self.instance_info.name + '_restore',
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
restorePoint=restore_point)
return result
def run_restore_from_backup_completed(
self, expected_states=['BUILD', 'ACTIVE'],
# TODO(peterstac) - This should probably return code 202
expected_http_code=200):
self.assert_restore_from_backup_completed(
self.restore_instance_id, expected_states, expected_http_code)
self.restore_host = self.get_instance_host(self.restore_instance_id)
def assert_restore_from_backup_completed(
self, instance_id, expected_states, expected_http_code):
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
def run_verify_data_in_restored_instance(self):
self.assert_verify_backup_data(self.restore_host)
def run_delete_restored_instance(
self, expected_states=['SHUTDOWN'],
expected_http_code=202):
self.assert_delete_restored_instance(
self.restore_instance_id, expected_states, expected_http_code)
def assert_delete_restored_instance(
self, instance_id, expected_states, expected_http_code):
self.auth_client.instances.delete(instance_id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
self.assert_all_gone(instance_id, expected_states[-1])
def run_delete_unknown_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.delete,
'unknown_backup')
def run_delete_backup_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
self._create_other_client()
self.assert_raises(
expected_exception, None,
self.other_client.backups.delete, self.backup_info.id)
# we're using a different client, so we'll check the return code
# on it explicitly, instead of depending on 'assert_raises'
self.assert_client_code(expected_http_code=expected_http_code,
client=self.other_client)
def run_delete_backup(self, expected_http_code=202):
self.assert_delete_backup(self.backup_info.id, expected_http_code)
def assert_delete_backup(
self, backup_id, expected_http_code):
self.auth_client.backups.delete(backup_id)
self.assert_client_code(expected_http_code)
self._wait_until_backup_is_gone(backup_id)
def _wait_until_backup_is_gone(self, backup_id):
def _backup_is_gone():
try:
self.auth_client.backups.get(backup_id)
return False
except exceptions.NotFound:
return True
poll_until(_backup_is_gone,
time_out=self.TIMEOUT_BACKUP_DELETE)
def run_check_for_incremental_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
if self.incremental_backup_info is None:
raise SkipTest("Incremental Backup not created")
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.backups.get,
self.incremental_backup_info.id)

View File

@ -25,10 +25,12 @@ from trove.common.context import TroveContext
from trove.common import utils
from trove.conductor import api as conductor_api
from trove.guestagent.backup import backupagent
from trove.guestagent.common import configuration
from trove.guestagent.strategies.backup.base import BackupRunner
from trove.guestagent.strategies.backup.base import UnknownBackupType
from trove.guestagent.strategies.backup.experimental import couchbase_impl
from trove.guestagent.strategies.backup.experimental import mongo_impl
from trove.guestagent.strategies.backup.experimental import redis_impl
from trove.guestagent.strategies.backup import mysql_impl
from trove.guestagent.strategies.backup.mysql_impl import MySqlApp
from trove.guestagent.strategies.restore.base import RestoreRunner
@ -251,6 +253,21 @@ class BackupAgentTest(trove_testtools.TestCase):
self.assertIsNotNone(mongodump.manifest)
self.assertIn('gz.enc', mongodump.manifest)
@patch.object(utils, 'execute_with_timeout')
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
def test_backup_impl_RedisBackup(self, *mocks):
netutils.get_my_ipv4 = Mock(return_value="1.1.1.1")
redis_backup = redis_impl.RedisBackup('redisbackup', extra_opts='')
self.assertIsNotNone(redis_backup)
str_redis_backup_cmd = ("sudo cat /var/lib/redis/dump.rdb | "
"gzip | openssl enc -aes-256-cbc -salt -pass "
"pass:default_aes_cbc_key")
self.assertEqual(str_redis_backup_cmd, redis_backup.cmd)
self.assertIsNotNone(redis_backup.manifest)
self.assertIn('gz.enc', redis_backup.manifest)
def test_backup_base(self):
"""This test is for
guestagent/strategies/backup/base

View File

@ -16,7 +16,8 @@ from mock import ANY, DEFAULT, patch
from testtools.testcase import ExpectedException
from trove.common import exception
from trove.common import utils
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.common import configuration
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.mongodb.service import MongoDBApp
from trove.guestagent.strategies.backup import base as backupBase
from trove.guestagent.strategies.backup.mysql_impl import MySqlApp
@ -44,6 +45,10 @@ BACKUP_MONGODUMP_CLS = ("trove.guestagent.strategies.backup."
"experimental.mongo_impl.MongoDump")
RESTORE_MONGODUMP_CLS = ("trove.guestagent.strategies.restore."
"experimental.mongo_impl.MongoDump")
BACKUP_REDIS_CLS = ("trove.guestagent.strategies.backup."
"experimental.redis_impl.RedisBackup")
RESTORE_REDIS_CLS = ("trove.guestagent.strategies.restore."
"experimental.redis_impl.RedisBackup")
PIPE = " | "
ZIP = "gzip"
@ -78,13 +83,14 @@ PREPARE = ("sudo innobackupex --apply-log /var/lib/mysql/data "
CRYPTO_KEY = "default_aes_cbc_key"
CBBACKUP_CMD = "tar cpPf - /tmp/backups"
CBBACKUP_RESTORE = "sudo tar xpPf -"
MONGODUMP_CMD = "sudo tar cPf - /var/lib/mongodb/dump"
MONGODUMP_RESTORE = "sudo tar xPf -"
REDISBACKUP_CMD = "sudo cat /var/lib/redis/dump.rdb"
REDISBACKUP_RESTORE = "tee /var/lib/redis/dump.rdb"
class GuestAgentBackupTest(trove_testtools.TestCase):
@ -320,8 +326,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
inst = MySQLRestoreMixin()
inst.reset_root_password()
chmod.assert_called_once_with(ANY, FileMode.ADD_READ_ALL,
as_root=True)
chmod.assert_called_once_with(
ANY, operating_system.FileMode.ADD_READ_ALL, as_root=True)
# Make sure the temporary error log got deleted as root
# (see bug/1423759).
@ -370,6 +376,61 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
self.assertEqual(restr.restore_cmd,
DECRYPT + PIPE + UNZIP + PIPE + MONGODUMP_RESTORE)
@patch.object(utils, 'execute_with_timeout')
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
def test_backup_encrypted_redisbackup_command(self, *mocks):
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_REDIS_CLS)
bkp = RunnerClass(12345)
self.assertIsNotNone(bkp)
self.assertEqual(
REDISBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command)
self.assertIn("gz.enc", bkp.manifest)
@patch.object(utils, 'execute_with_timeout')
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
def test_backup_not_encrypted_redisbackup_command(self, *mocks):
backupBase.BackupRunner.is_encrypted = False
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_REDIS_CLS)
bkp = RunnerClass(12345)
self.assertIsNotNone(bkp)
self.assertEqual(REDISBACKUP_CMD + PIPE + ZIP, bkp.command)
self.assertIn("gz", bkp.manifest)
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
@patch.object(operating_system, 'chown')
@patch.object(operating_system, 'create_directory')
def test_restore_decrypted_redisbackup_command(self, *mocks):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
RunnerClass = utils.import_class(RESTORE_REDIS_CLS)
restr = RunnerClass(None, restore_location="/tmp",
location="filename", checksum="md5")
self.assertEqual(restr.restore_cmd, UNZIP + PIPE + REDISBACKUP_RESTORE)
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
@patch.object(operating_system, 'chown')
@patch.object(operating_system, 'create_directory')
def test_restore_encrypted_redisbackup_command(self, *mocks):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(RESTORE_REDIS_CLS)
restr = RunnerClass(None, restore_location="/tmp",
location="filename", checksum="md5")
self.assertEqual(restr.restore_cmd,
DECRYPT + PIPE + UNZIP + PIPE + REDISBACKUP_RESTORE)
class CouchbaseBackupTests(trove_testtools.TestCase):
@ -529,3 +590,98 @@ class MongodbRestoreTests(trove_testtools.TestCase):
self.restore_runner.post_restore = mock.Mock()
self.assertRaises(exception.ProcessExecutionError,
self.restore_runner.restore)
class RedisBackupTests(trove_testtools.TestCase):
def setUp(self):
super(RedisBackupTests, self).setUp()
self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout')
self.exec_timeout_patch.start()
self.addCleanup(self.exec_timeout_patch.stop)
self.conf_man_patch = patch.object(
configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
self.conf_man_patch.start()
self.addCleanup(self.conf_man_patch.stop)
self.backup_runner = utils.import_class(BACKUP_REDIS_CLS)
self.backup_runner_patch = patch.multiple(
self.backup_runner, _run=DEFAULT,
_run_pre_backup=DEFAULT, _run_post_backup=DEFAULT)
self.backup_runner_mocks = self.backup_runner_patch.start()
self.addCleanup(self.backup_runner_patch.stop)
def tearDown(self):
super(RedisBackupTests, self).tearDown()
def test_backup_success(self):
with self.backup_runner(12345):
pass
self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with()
self.backup_runner_mocks['_run'].assert_called_once_with()
self.backup_runner_mocks['_run_post_backup'].assert_called_once_with()
def test_backup_failed_due_to_run_backup(self):
self.backup_runner_mocks['_run'].configure_mock(
side_effect=exception.TroveError('test')
)
with ExpectedException(exception.TroveError, 'test'):
with self.backup_runner(12345):
pass
self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with()
self.backup_runner_mocks['_run'].assert_called_once_with()
self.assertEqual(
0, self.backup_runner_mocks['_run_post_backup'].call_count)
class RedisRestoreTests(trove_testtools.TestCase):
def setUp(self):
super(RedisRestoreTests, self).setUp()
self.conf_man_patch = patch.object(
configuration.ConfigurationManager, 'parse_configuration',
mock.Mock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
self.conf_man_patch.start()
self.addCleanup(self.conf_man_patch.stop)
self.os_patch = patch.multiple(operating_system,
chown=DEFAULT,
create_directory=DEFAULT)
self.os_patch.start()
self.addCleanup(self.os_patch.stop)
self.restore_runner = utils.import_class(
RESTORE_REDIS_CLS)('swift', location='http://some.where',
checksum='True_checksum',
restore_location='/var/lib/somewhere')
self.restore_runner_patch = patch.multiple(
self.restore_runner, _run_restore=DEFAULT,
pre_restore=DEFAULT, post_restore=DEFAULT)
self.restore_runner_mocks = self.restore_runner_patch.start()
self.expected_content_length = 123
self.restore_runner._run_restore = mock.Mock(
return_value=self.expected_content_length)
self.addCleanup(self.restore_runner_patch.stop)
def tearDown(self):
super(RedisRestoreTests, self).tearDown()
def test_restore_success(self):
actual_content_length = self.restore_runner.restore()
self.assertEqual(
self.expected_content_length, actual_content_length)
def test_restore_failed_due_to_pre_restore(self):
self.restore_runner_mocks['pre_restore'].side_effect = (
exception.ProcessExecutionError('Error'))
self.assertRaises(exception.ProcessExecutionError,
self.restore_runner.restore)
def test_restore_failed_due_to_run_restore(self):
self.restore_runner._run_restore.side_effect = (
exception.ProcessExecutionError('Error'))
self.assertRaises(exception.ProcessExecutionError,
self.restore_runner.restore)

View File

@ -17,7 +17,7 @@ import testtools
from trove.common.context import TroveContext
from trove.guestagent import backup
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common import configuration
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis import (
service as redis_service)
@ -72,14 +72,27 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
def test_prepare_redis_not_installed(self):
self._prepare_dynamic(is_redis_installed=False)
@patch.multiple(redis_service.RedisApp,
apply_initial_guestagent_configuration=DEFAULT)
@patch.object(ConfigurationManager, 'save_configuration')
@patch.object(redis_service.RedisApp, 'get_working_dir',
MagicMock(return_value='/var/lib/redis'))
def test_prepare_redis_from_backup(self):
self._prepare_dynamic(backup_id='backup_id_123abc')
@patch.object(redis_service.RedisApp,
'apply_initial_guestagent_configuration')
@patch.object(configuration.ConfigurationManager, 'save_configuration')
def _prepare_dynamic(self, save_configuration_mock,
apply_initial_guestagent_configuration,
device_path='/dev/vdb', is_redis_installed=True,
backup_info=None, is_root_enabled=False,
mount_point='var/lib/redis'):
mount_point='var/lib/redis', backup_id=None):
backup_info = None
if backup_id is not None:
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'RedisBackup',
'checksum': 'fake-checksum',
}
# covering all outcomes is starting to cause trouble here
mock_status = MagicMock()
@ -110,7 +123,12 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
apply_initial_guestagent_configuration.assert_called_once_with()
operating_system.chown.assert_any_call(
mount_point, 'redis', 'redis', as_root=True)
redis_service.RedisApp.restart.assert_any_call()
if backup_info:
backup.restore.assert_called_once_with(self.context,
backup_info,
'/var/lib/redis')
else:
redis_service.RedisApp.restart.assert_any_call()
def test_restart(self):
mock_status = MagicMock()
@ -128,3 +146,14 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
self.manager.stop_db(self.context)
redis_service.RedisApp.stop_db.assert_any_call(
do_not_start_on_reboot=False)
@patch.object(backup, 'backup')
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
MagicMock(return_value={'dir': '/var/lib/redis',
'dbfilename': 'dump.rdb'}))
@patch.object(operating_system, 'chown')
@patch.object(operating_system, 'create_directory')
def test_create_backup(self, *mocks):
backup.backup = MagicMock(return_value=None)
RedisManager().create_backup(self.context, 'backup_id_123')
backup.backup.assert_any_call(self.context, 'backup_id_123')