Refactor the datastore manager classes

There is a large amount of boiler-plate code in each datastore manager.
As more managers are added, the time involved in maintaining all this
code wil continue to grow. To alleviate this, a base manager class
has been added where all common code can reside.

The initial refactoring just moved some of the obvious code (such as
rpc_ping and update_status) into the base class, along with defining
properties that can be used to further abstract functionality
going forward.

The issue of having instances move in-and-out of ACTIVE state has
also been fixed by adding a flag file that is written by the base
class once prepare has finished successfully.

Closes-Bug: #1482795
Closes-Bug: #1487233
Partially Implements: blueprint datastore-manager-refactor
Change-Id: I603cf2ddebab1d7a5c874cd66431f803aaee2d42
This commit is contained in:
Peter Stachowski 2015-09-18 18:15:01 -04:00
parent 1cb7dc7792
commit 1faa4d427d
46 changed files with 773 additions and 882 deletions

View File

@ -98,7 +98,7 @@ class ServiceStatuses(object):
DELETED = ServiceStatus(0x05, 'deleted', 'DELETED')
FAILED_TIMEOUT_GUESTAGENT = ServiceStatus(0x18, 'guestagent error',
'ERROR')
BUILD_PENDING = ServiceStatus(0x19, 'build pending', 'BUILD')
INSTANCE_READY = ServiceStatus(0x19, 'instance ready', 'BUILD')
# Dissuade further additions at run-time.
ServiceStatus.__init__ = None

View File

@ -97,7 +97,7 @@ def build_file_path(base_dir, base_name, *extensions):
:returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.join(base_dir, file_name)
return os.path.expanduser(os.path.join(base_dir, file_name))
def to_bytes(value):

View File

@ -17,45 +17,30 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.cassandra import service
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
def __init__(self):
self.appStatus = service.CassandraAppStatus()
self.app = service.CassandraApp(self.appStatus)
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('cassandra')
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the Cassandra service."""
self.appStatus.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
@property
def status(self):
return self.appStatus
def restart(self, context):
self.app.restart()
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
@ -65,13 +50,11 @@ class Manager(periodic_task.PeriodicTasks):
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
LOG.info(_("Setting status of instance to BUILDING."))
self.appStatus.begin_install()
LOG.debug("Installing cassandra.")
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
self.app.init_storage_structure(mount_point)
@ -103,79 +86,76 @@ class Manager(periodic_task.PeriodicTasks):
LOG.debug("Restarting database after changes.")
self.app.start_db()
self.appStatus.end_install_or_restart()
LOG.info(_("Completed setup of Cassandra database instance."))
def change_passwords(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def update_attributes(self, context, username, hostname, user_attrs):
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def create_database(self, context, databases):
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
operation='create_database', datastore=self.manager)
def create_user(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
operation='create_user', datastore=self.manager)
def delete_database(self, context, database):
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
operation='delete_database', datastore=self.manager)
def delete_user(self, context, user):
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
operation='delete_user', datastore=self.manager)
def get_user(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
operation='get_user', datastore=self.manager)
def grant_access(self, context, username, hostname, databases):
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
operation='grant_access', datastore=self.manager)
def revoke_access(self, context, username, hostname, database):
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
operation='revoke_access', datastore=self.manager)
def list_access(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
operation='list_access', datastore=self.manager)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
operation='list_databases', datastore=self.manager)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
operation='list_users', datastore=self.manager)
def enable_root(self, context):
raise exception.DatastoreOperationNotSupported(
operation='enable_root', datastore=MANAGER)
operation='enable_root', datastore=self.manager)
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
operation='enable_root_with_password', datastore=self.manager)
def is_root_enabled(self, context):
raise exception.DatastoreOperationNotSupported(
operation='is_root_enabled', datastore=MANAGER)
operation='is_root_enabled', datastore=self.manager)
def _perform_restore(self, backup_info, context, restore_location, app):
raise exception.DatastoreOperationNotSupported(
operation='_perform_restore', datastore=MANAGER)
operation='_perform_restore', datastore=self.manager)
def create_backup(self, context, backup_info):
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
operation='create_backup', datastore=self.manager)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
@ -197,52 +177,52 @@ class Manager(periodic_task.PeriodicTasks):
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides.")
raise exception.DatastoreOperationNotSupported(
operation='update_overrides', datastore=MANAGER)
operation='update_overrides', datastore=self.manager)
def apply_overrides(self, context, overrides):
LOG.debug("Applying overrides.")
raise exception.DatastoreOperationNotSupported(
operation='apply_overrides', datastore=MANAGER)
operation='apply_overrides', datastore=self.manager)
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
raise exception.DatastoreOperationNotSupported(
operation='get_replication_snapshot', datastore=MANAGER)
operation='get_replication_snapshot', datastore=self.manager)
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=MANAGER)
operation='attach_replication_slave', datastore=self.manager)
def detach_replica(self, context, for_failover=False):
raise exception.DatastoreOperationNotSupported(
operation='detach_replica', datastore=MANAGER)
operation='detach_replica', datastore=self.manager)
def get_replica_context(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_replica_context', datastore=MANAGER)
operation='get_replica_context', datastore=self.manager)
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=MANAGER)
operation='make_read_only', datastore=self.manager)
def enable_as_master(self, context, replica_source_config):
raise exception.DatastoreOperationNotSupported(
operation='enable_as_master', datastore=MANAGER)
operation='enable_as_master', datastore=self.manager)
def get_txn_count(self):
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=MANAGER)
operation='get_txn_count', datastore=self.manager)
def get_latest_txn_id(self):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=MANAGER)
operation='get_latest_txn_id', datastore=self.manager)
def wait_for_txn(self, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=MANAGER)
operation='wait_for_txn', datastore=self.manager)
def demote_replication_master(self, context):
LOG.debug("Demoting replication master.")
raise exception.DatastoreOperationNotSupported(
operation='demote_replication_master', datastore=MANAGER)
operation='demote_replication_master', datastore=self.manager)

View File

@ -53,9 +53,6 @@ class CassandraApp(object):
self._install_db(packages)
LOG.debug("Cassandra install_if_needed complete")
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def _enable_db_on_boot(self):
utils.execute_with_timeout(system.ENABLE_CASSANDRA_ON_BOOT,
shell=True)
@ -89,7 +86,7 @@ class CassandraApp(object):
shell=True)
except exception.ProcessExecutionError:
LOG.exception(_("Error killing Cassandra start command."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not start Cassandra"))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
@ -103,7 +100,7 @@ class CassandraApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db)):
LOG.error(_("Could not stop Cassandra."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not stop Cassandra."))
def restart(self):
@ -113,7 +110,7 @@ class CassandraApp(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def _install_db(self, packages):
"""Install cassandra server"""

View File

@ -16,25 +16,21 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
"""
This is Couchbase Manager class. It is dynamically loaded
based off of the datastore of the trove instance
@ -42,37 +38,24 @@ class Manager(periodic_task.PeriodicTasks):
def __init__(self):
self.appStatus = service.CouchbaseAppStatus()
self.app = service.CouchbaseApp(self.appStatus)
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('couchbase')
@periodic_task.periodic_task
def update_status(self, context):
"""
Updates the couchbase trove instance. It is decorated with
perodic task so it is automatically called every 3 ticks.
"""
self.appStatus.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
@property
def status(self):
return self.appStatus
def change_passwords(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the trove instance first comes online.
It is the first rpc message passed from the task manager.
prepare handles all the base configuration of the Couchbase instance.
"""
self.appStatus.begin_install()
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
if device_path:
device = volume.VolumeDevice(device_path)
@ -91,8 +74,6 @@ class Manager(periodic_task.PeriodicTasks):
self._perform_restore(backup_info,
context,
mount_point)
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of Couchbase database instance.'))
def restart(self, context):
"""
@ -113,57 +94,51 @@ class Manager(periodic_task.PeriodicTasks):
"""
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def update_attributes(self, context, username, hostname, user_attrs):
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def create_database(self, context, databases):
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
operation='create_database', datastore=self.manager)
def create_user(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
operation='create_user', datastore=self.manager)
def delete_database(self, context, database):
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
operation='delete_database', datastore=self.manager)
def delete_user(self, context, user):
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
operation='delete_user', datastore=self.manager)
def get_user(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
operation='get_user', datastore=self.manager)
def grant_access(self, context, username, hostname, databases):
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
operation='grant_access', datastore=self.manager)
def revoke_access(self, context, username, hostname, database):
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
operation='revoke_access', datastore=self.manager)
def list_access(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
operation='list_access', datastore=self.manager)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
operation='list_databases', datastore=self.manager)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
operation='list_users', datastore=self.manager)
def enable_root(self, context):
LOG.debug("Enabling root.")
@ -172,7 +147,7 @@ class Manager(periodic_task.PeriodicTasks):
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
operation='enable_root_with_password', datastore=self.manager)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
@ -221,52 +196,52 @@ class Manager(periodic_task.PeriodicTasks):
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides.")
raise exception.DatastoreOperationNotSupported(
operation='update_overrides', datastore=MANAGER)
operation='update_overrides', datastore=self.manager)
def apply_overrides(self, context, overrides):
LOG.debug("Applying overrides.")
raise exception.DatastoreOperationNotSupported(
operation='apply_overrides', datastore=MANAGER)
operation='apply_overrides', datastore=self.manager)
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
raise exception.DatastoreOperationNotSupported(
operation='get_replication_snapshot', datastore=MANAGER)
operation='get_replication_snapshot', datastore=self.manager)
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=MANAGER)
operation='attach_replication_slave', datastore=self.manager)
def detach_replica(self, context, for_failover=False):
raise exception.DatastoreOperationNotSupported(
operation='detach_replica', datastore=MANAGER)
operation='detach_replica', datastore=self.manager)
def get_replica_context(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_replica_context', datastore=MANAGER)
operation='get_replica_context', datastore=self.manager)
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=MANAGER)
operation='make_read_only', datastore=self.manager)
def enable_as_master(self, context, replica_source_config):
raise exception.DatastoreOperationNotSupported(
operation='enable_as_master', datastore=MANAGER)
operation='enable_as_master', datastore=self.manager)
def get_txn_count(self):
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=MANAGER)
operation='get_txn_count', datastore=self.manager)
def get_latest_txn_id(self):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=MANAGER)
operation='get_latest_txn_id', datastore=self.manager)
def wait_for_txn(self, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=MANAGER)
operation='wait_for_txn', datastore=self.manager)
def demote_replication_master(self, context):
LOG.debug("Demoting replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='demote_replication_master', datastore=MANAGER)
operation='demote_replication_master', datastore=self.manager)

View File

@ -92,12 +92,6 @@ class CouchbaseApp(object):
LOG.exception(_('Error performing initial Couchbase setup.'))
raise RuntimeError("Couchbase Server initial setup failed")
def complete_install_or_restart(self):
"""
finalize status updates for install or restart.
"""
self.status.end_install_or_restart()
def _install_couchbase(self, packages):
"""
Install the Couchbase Server.
@ -156,7 +150,7 @@ class CouchbaseApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_('Could not stop Couchbase Server.'))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not stop Couchbase Server."))
def restart(self):
@ -166,7 +160,7 @@ class CouchbaseApp(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def start_db(self, update_db=False):
"""
@ -193,7 +187,7 @@ class CouchbaseApp(object):
utils.execute_with_timeout(system.cmd_kill)
except exception.ProcessExecutionError:
LOG.exception(_('Error killing Couchbase start command.'))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not start Couchbase Server")
def enable_root(self, root_password=None):

View File

@ -16,21 +16,17 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.couchdb import service
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
"""
This is CouchDB Manager class. It is dynamically loaded
based off of the datastore of the Trove instance.
@ -39,22 +35,17 @@ class Manager(periodic_task.PeriodicTasks):
def __init__(self):
self.appStatus = service.CouchDBAppStatus()
self.app = service.CouchDBApp(self.appStatus)
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('couchdb')
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
@property
def status(self):
return self.appStatus
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the Trove instance first comes online.
It is the first RPC message passed from the task manager.
prepare handles all the base configuration of the CouchDB instance.
"""
self.appStatus.begin_install()
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
if device_path:
self.app.stop_db()
@ -69,20 +60,6 @@ class Manager(periodic_task.PeriodicTasks):
self.app.start_db()
self.app.change_permissions()
self.app.make_host_reachable()
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of CouchDB database instance.'))
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the CouchDB service."""
self.appStatus.update()
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("In get_filesystem_stats: fs_path= %s" % fs_path)
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def stop_db(self, context, do_not_start_on_reboot=False):
"""
@ -113,84 +90,84 @@ class Manager(periodic_task.PeriodicTasks):
def change_passwords(self, context, users):
LOG.debug("Changing password.")
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def update_attributes(self, context, username, hostname, user_attrs):
LOG.debug("Updating database attributes.")
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def create_database(self, context, databases):
LOG.debug("Creating database.")
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
operation='create_database', datastore=self.manager)
def create_user(self, context, users):
LOG.debug("Creating user.")
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
operation='create_user', datastore=self.manager)
def delete_database(self, context, database):
LOG.debug("Deleting database.")
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
operation='delete_database', datastore=self.manager)
def delete_user(self, context, user):
LOG.debug("Deleting user.")
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
operation='delete_user', datastore=self.manager)
def get_user(self, context, username, hostname):
LOG.debug("Getting user.")
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
operation='get_user', datastore=self.manager)
def grant_access(self, context, username, hostname, databases):
LOG.debug("Granting acccess.")
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
operation='grant_access', datastore=self.manager)
def revoke_access(self, context, username, hostname, database):
LOG.debug("Revoking access.")
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
operation='revoke_access', datastore=self.manager)
def list_access(self, context, username, hostname):
LOG.debug("Listing access.")
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
operation='list_access', datastore=self.manager)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing databases.")
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
operation='list_databases', datastore=self.manager)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing users.")
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
operation='list_users', datastore=self.manager)
def enable_root(self, context):
LOG.debug("Enabling root.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root', datastore=MANAGER)
operation='enable_root', datastore=self.manager)
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
operation='enable_root_with_password', datastore=self.manager)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
raise exception.DatastoreOperationNotSupported(
operation='is_root_enabled', datastore=MANAGER)
operation='is_root_enabled', datastore=self.manager)
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
operation='create_backup', datastore=self.manager)
def start_db_with_conf_changes(self, context, config_contents):
LOG.debug("Starting CouchDB with configuration changes.")

View File

@ -64,12 +64,6 @@ class CouchDBApp(object):
packager.pkg_install(packages, {}, system.TIME_OUT)
LOG.info(_("Finished installing CouchDB server."))
def complete_install_or_restart(self):
"""
Finalize status updates for install or restart.
"""
self.status.end_install_or_restart()
def change_permissions(self):
"""
When CouchDB is installed, a default user 'couchdb' is created.
@ -152,7 +146,7 @@ class CouchDBApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop CouchDB."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not stop CouchDB."))
def start_db(self, update_db=False):
@ -176,7 +170,7 @@ class CouchDBApp(object):
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of CouchDB server failed."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not start CouchDB server.")
def restart(self):
@ -186,7 +180,7 @@ class CouchDBApp(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def make_host_reachable(self):
try:

View File

@ -14,21 +14,17 @@
# under the License.
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.db2 import service
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
"""
This is DB2 Manager class. It is dynamically loaded
based off of the datastore of the Trove instance.
@ -37,31 +33,17 @@ class Manager(periodic_task.PeriodicTasks):
self.appStatus = service.DB2AppStatus()
self.app = service.DB2App(self.appStatus)
self.admin = service.DB2Admin()
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('db2')
@periodic_task.periodic_task
def update_status(self, context):
"""
Updates the status of DB2 Trove instance. It is decorated
with perodic task so it is automatically called every 3 ticks.
"""
self.appStatus.update()
@property
def status(self):
return self.appStatus
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the Trove instance first comes online.
It is the first rpc message passed from the task manager.
prepare handles all the base configuration of the DB2 instance.
"""
LOG.debug("Preparing the guest agent for DB2.")
self.appStatus.begin_install()
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
if device_path:
device = volume.VolumeDevice(device_path)
device.unmount_device(device_path)
@ -71,16 +53,6 @@ class Manager(periodic_task.PeriodicTasks):
self.app.change_ownership(mount_point)
self.app.start_db()
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
self.update_status(context)
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of DB2 database instance.'))
def restart(self, context):
"""
Restart this DB2 instance.
@ -99,13 +71,6 @@ class Manager(periodic_task.PeriodicTasks):
LOG.debug("Stop a given DB2 server instance.")
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("Get the filesystem stats.")
mount_point = CONF.get(
'db2' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def create_database(self, context, databases):
LOG.debug("Creating database(s)." % databases)
self.admin.create_database(databases)
@ -164,12 +129,12 @@ class Manager(periodic_task.PeriodicTasks):
def grant_access(self, context, username, hostname, databases):
LOG.debug("Granting acccess.")
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
operation='grant_access', datastore=self.manager)
def revoke_access(self, context, username, hostname, database):
LOG.debug("Revoking access.")
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
operation='revoke_access', datastore=self.manager)
def reset_configuration(self, context, configuration):
"""
@ -182,38 +147,38 @@ class Manager(periodic_task.PeriodicTasks):
def change_passwords(self, context, users):
LOG.debug("Changing password.")
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def update_attributes(self, context, username, hostname, user_attrs):
LOG.debug("Updating database attributes.")
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def enable_root(self, context):
LOG.debug("Enabling root.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root', datastore=MANAGER)
operation='enable_root', datastore=self.manager)
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
operation='enable_root_with_password', datastore=self.manager)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
raise exception.DatastoreOperationNotSupported(
operation='is_root_enabled', datastore=MANAGER)
operation='is_root_enabled', datastore=self.manager)
def _perform_restore(self, backup_info, context, restore_location, app):
raise exception.DatastoreOperationNotSupported(
operation='_perform_restore', datastore=MANAGER)
operation='_perform_restore', datastore=self.manager)
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
operation='create_backup', datastore=self.manager)
def get_config_changes(self, cluster_config, mount_point=None):
LOG.debug("Get configuration changes")
raise exception.DatastoreOperationNotSupported(
operation='get_configuration_changes', datastore=MANAGER)
operation='get_configuration_changes', datastore=self.manager)

View File

@ -44,9 +44,6 @@ class DB2App(object):
LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time)
self.status = status
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def change_ownership(self, mount_point):
"""
When DB2 server instance is installed, it does not have the
@ -104,7 +101,7 @@ class DB2App(object):
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start of DB2 server instance failed."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not start DB2."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
@ -120,7 +117,7 @@ class DB2App(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db)):
LOG.error(_("Could not stop DB2."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not stop DB2."))
def restart(self):
@ -130,7 +127,7 @@ class DB2App(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
class DB2AppStatus(service.BaseDbStatus):

View File

@ -16,11 +16,12 @@
from oslo_utils import importutils
from trove.common import cfg
from trove.guestagent.datastore.mysql import manager_base
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.strategies.replication import get_replication_strategy
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
MANAGER = CONF.datastore_manager or 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
REPLICATION_NAMESPACE = CONF.get(MANAGER).replication_namespace
REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
@ -34,7 +35,7 @@ MYSQL_ADMIN = "trove.guestagent.datastore.experimental.mariadb.service." \
"MySqlAdmin"
class Manager(manager_base.BaseMySqlManager):
class Manager(manager.MySqlManager):
def __init__(self):
mysql_app = importutils.import_class(MYSQL_APP)

View File

@ -15,36 +15,36 @@
#
from oslo_log import log as logging
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
class KeepAliveConnection(service_base.BaseKeepAliveConnection):
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service_base.BaseMySqlAppStatus):
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service_base.BaseLocalSqlClient):
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service_base.BaseMySqlApp):
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
class MySqlRootAccess(service_base.BaseMySqlRootAccess):
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service_base.BaseMySqlAdmin):
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)

View File

@ -16,9 +16,7 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as ds_instance
@ -26,39 +24,29 @@ from trove.guestagent import backup
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.mongodb import service
from trove.guestagent.datastore.experimental.mongodb import system
from trove.guestagent.datastore import manager
from trove.guestagent import dbaas
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
def __init__(self):
self.app = service.MongoDBApp()
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('mongodb')
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the MongoDB service."""
self.app.status.update()
@property
def status(self):
return self.app.status
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""Makes ready DBAAS on a Guest container."""
LOG.debug("Preparing MongoDB instance.")
self.app.status.begin_install()
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
self.app.wait_for_start()
self.app.stop_db()
@ -105,20 +93,6 @@ class Manager(periodic_task.PeriodicTasks):
LOG.debug('Root password provided. Enabling root.')
service.MongoDBAdmin().enable_root(root_password)
if not cluster_config:
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
if cluster_config:
self.app.status.set_status(
ds_instance.ServiceStatuses.BUILD_PENDING)
else:
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of MongoDB database instance.'))
def restart(self, context):
LOG.debug("Restarting MongoDB.")
self.app.restart()
@ -138,17 +112,18 @@ class Manager(periodic_task.PeriodicTasks):
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("Getting file system status.")
# TODO(peterstac) - why is this hard-coded?
return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT)
def change_passwords(self, context, users):
LOG.debug("Changing password.")
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def update_attributes(self, context, username, hostname, user_attrs):
LOG.debug("Updating database attributes.")
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def create_database(self, context, databases):
LOG.debug("Creating database(s).")
@ -200,7 +175,7 @@ class Manager(periodic_task.PeriodicTasks):
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
operation='enable_root_with_password', datastore=self.manager)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
@ -253,46 +228,46 @@ class Manager(periodic_task.PeriodicTasks):
replica_source_config=None):
LOG.debug("Getting replication snapshot.")
raise exception.DatastoreOperationNotSupported(
operation='get_replication_snapshot', datastore=MANAGER)
operation='get_replication_snapshot', datastore=self.manager)
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replica.")
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=MANAGER)
operation='attach_replication_slave', datastore=self.manager)
def detach_replica(self, context, for_failover=False):
LOG.debug("Detaching replica.")
raise exception.DatastoreOperationNotSupported(
operation='detach_replica', datastore=MANAGER)
operation='detach_replica', datastore=self.manager)
def get_replica_context(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_replica_context', datastore=MANAGER)
operation='get_replica_context', datastore=self.manager)
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=MANAGER)
operation='make_read_only', datastore=self.manager)
def enable_as_master(self, context, replica_source_config):
raise exception.DatastoreOperationNotSupported(
operation='enable_as_master', datastore=MANAGER)
operation='enable_as_master', datastore=self.manager)
def get_txn_count(self):
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=MANAGER)
operation='get_txn_count', datastore=self.manager)
def get_latest_txn_id(self):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=MANAGER)
operation='get_latest_txn_id', datastore=self.manager)
def wait_for_txn(self, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=MANAGER)
operation='wait_for_txn', datastore=self.manager)
def demote_replication_master(self, context):
LOG.debug("Demoting replica source.")
raise exception.DatastoreOperationNotSupported(
operation='demote_replication_master', datastore=MANAGER)
operation='demote_replication_master', datastore=self.manager)
def add_members(self, context, members):
try:
@ -325,12 +300,6 @@ class Manager(periodic_task.PeriodicTasks):
self.app.status.set_status(ds_instance.ServiceStatuses.FAILED)
raise
def cluster_complete(self, context):
# Now that cluster creation is complete, start status checks
LOG.debug("Cluster creation complete, starting status checks.")
status = self.app.status._get_actual_db_status()
self.app.status.set_status(status)
def get_key(self, context):
# Return the cluster key
LOG.debug("Getting the cluster key.")

View File

@ -133,7 +133,7 @@ class MongoDBApp(object):
ds_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MongoDB."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError(_("Could not stop MongoDB"))
def restart(self):
@ -143,7 +143,7 @@ class MongoDBApp(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def start_db(self, update_db=False):
LOG.info(_("Starting MongoDB."))
@ -177,13 +177,10 @@ class MongoDBApp(object):
except exception.ProcessExecutionError:
LOG.exception(_("Error killing MongoDB start command."))
# There's nothing more we can do...
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not start MongoDB.")
LOG.debug('MongoDB started successfully.')
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.configuration_manager.apply_user_override(overrides)

View File

@ -16,11 +16,12 @@
from oslo_utils import importutils
from trove.common import cfg
from trove.guestagent.datastore.mysql import manager_base
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.strategies.replication import get_replication_strategy
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
MANAGER = CONF.datastore_manager or 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
REPLICATION_NAMESPACE = CONF.get(MANAGER).replication_namespace
REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
@ -34,7 +35,7 @@ MYSQL_ADMIN = "trove.guestagent.datastore.experimental.percona." \
"service.MySqlAdmin"
class Manager(manager_base.BaseMySqlManager):
class Manager(manager.MySqlManager):
def __init__(self):
mysql_app = importutils.import_class(MYSQL_APP)

View File

@ -15,36 +15,36 @@
#
from oslo_log import log as logging
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
class KeepAliveConnection(service_base.BaseKeepAliveConnection):
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service_base.BaseMySqlAppStatus):
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service_base.BaseLocalSqlClient):
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service_base.BaseMySqlApp):
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
class MySqlRootAccess(service_base.BaseMySqlRootAccess):
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service_base.BaseMySqlAdmin):
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)

View File

@ -16,9 +16,7 @@
import os
from oslo_config import cfg as os_cfg
from oslo_log import log as logging
from oslo_service import periodic_task
from .service.config import PgSqlConfig
from .service.database import PgSqlDatabase
@ -26,18 +24,16 @@ from .service.install import PgSqlInstall
from .service.root import PgSqlRoot
from .service.status import PgSqlAppStatus
from .service.users import PgSqlUsers
from trove.common import cfg
from trove.guestagent import backup
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(
periodic_task.PeriodicTasks,
manager.Manager,
PgSqlUsers,
PgSqlDatabase,
PgSqlRoot,
@ -45,42 +41,17 @@ class Manager(
PgSqlInstall,
):
def __init__(self, *args, **kwargs):
if len(args) and isinstance(args[0], os_cfg.ConfigOpts):
conf = args[0]
elif 'conf' in kwargs:
conf = kwargs['conf']
else:
conf = CONF
def __init__(self):
super(Manager, self).__init__()
super(Manager, self).__init__(conf)
@property
def status(self):
return PgSqlAppStatus.get()
@periodic_task.periodic_task
def update_status(self, context):
PgSqlAppStatus.get().update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def prepare(
self,
context,
packages,
databases,
memory_mb,
users,
device_path=None,
mount_point=None,
backup_info=None,
config_contents=None,
root_password=None,
overrides=None,
cluster_config=None,
snapshot=None
):
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
self.install(context, packages)
PgSqlAppStatus.get().begin_restart()
self.stop_db(context)
if device_path:
device = volume.VolumeDevice(device_path)
@ -98,18 +69,6 @@ class Manager(
if root_password and not backup_info:
self.enable_root(context, root_password)
PgSqlAppStatus.get().end_install_or_restart()
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
def get_filesystem_stats(self, context, fs_path):
mount_point = CONF.get(CONF.datastore_manager).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def create_backup(self, context, backup_info):
backup.backup(context, backup_info)

View File

@ -119,4 +119,4 @@ class PgSqlConfig(PgSqlProcess):
guest_id=CONF.guest_id,
)
)
PgSqlAppStatus.get().end_install_or_restart()
PgSqlAppStatus.get().end_restart()

View File

@ -17,11 +17,8 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common import instance
from trove.guestagent.datastore.experimental.postgresql.service.process import(
PgSqlProcess)
from trove.guestagent.datastore.experimental.postgresql.service.status import(
PgSqlAppStatus)
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
@ -46,7 +43,6 @@ class PgSqlInstall(PgSqlProcess):
guest_id=CONF.guest_id
)
)
PgSqlAppStatus.get().begin_install()
packager = pkg.Package()
if not packager.pkg_is_installed(packages):
try:
@ -69,10 +65,7 @@ class PgSqlInstall(PgSqlProcess):
packages=packages,
)
)
PgSqlAppStatus.get().end_install_or_restart()
PgSqlAppStatus.get().set_status(
instance.ServiceStatuses.FAILED
)
raise
except Exception:
LOG.exception(
"{guest_id}: The package manager encountered an unknown "
@ -81,13 +74,9 @@ class PgSqlInstall(PgSqlProcess):
packages=packages,
)
)
PgSqlAppStatus.get().end_install_or_restart()
PgSqlAppStatus.get().set_status(
instance.ServiceStatuses.FAILED
)
raise
else:
self.start_db(context)
PgSqlAppStatus.get().end_install_or_restart()
LOG.debug(
"{guest_id}: Completed package installation.".format(
guest_id=CONF.guest_id,

View File

@ -14,23 +14,19 @@
# under the License.
#
import os
from oslo_log import log as logging
from oslo_utils import importutils
from trove.common import cfg
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.mysql import manager_base
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.strategies.replication import get_replication_strategy
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
MANAGER = CONF.datastore_manager or 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
REPLICATION_NAMESPACE = CONF.get(MANAGER).replication_namespace
REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
@ -44,7 +40,7 @@ MYSQL_ADMIN = "trove.guestagent.datastore.experimental.pxc." \
"service.PXCAdmin"
class Manager(manager_base.BaseMySqlManager):
class Manager(manager.MySqlManager):
def __init__(self):
mysql_app = importutils.import_class(MYSQL_APP)
@ -56,72 +52,16 @@ class Manager(manager_base.BaseMySqlManager):
REPLICATION_NAMESPACE,
REPLICATION_STRATEGY_CLASS, MANAGER)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""Makes ready DBAAS on a Guest container."""
self.mysql_app_status.get().begin_install()
# status end_mysql_install set with secure()
app = self.mysql_app(self.mysql_app_status.get())
app.install_if_needed(packages)
if device_path:
# stop and do not update database
app.stop_db(do_not_start_on_reboot=True)
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
# rsync existing data to a "data" sub-directory
# on the new volume
device.migrate_data(mount_point, target_subdir="data")
# mount the volume
device.mount(mount_point)
operating_system.chown(
mount_point, service_base.MYSQL_OWNER,
service_base.MYSQL_OWNER,
recursive=False, as_root=True)
LOG.debug("Mounted the volume at %s." % mount_point)
# We need to temporarily update the default my.cnf so that
# mysql will start after the volume is mounted. Later on it
# will be changed based on the config template
# (see MySqlApp.secure()) and restart.
app.set_data_dir(mount_point + '/data')
app.start_mysql()
if backup_info:
self._perform_restore(backup_info, context,
mount_point + "/data", app)
LOG.debug("Securing MySQL now.")
app.secure(config_contents, overrides)
enable_root_on_restore = (backup_info and
MySqlAdmin().is_root_enabled())
if root_password and not backup_info:
app.secure_root(secure_remote_root=True)
MySqlAdmin().enable_root(root_password)
elif enable_root_on_restore:
app.secure_root(secure_remote_root=False)
app.get().report_root(context, 'root')
else:
app.secure_root(secure_remote_root=True)
if cluster_config is None:
app.complete_install_or_restart()
else:
app.status.set_status(
rd_instance.ServiceStatuses.BUILD_PENDING)
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
if snapshot:
self.attach_replica(context, snapshot, snapshot['config'])
LOG.info(_('Completed setup of MySQL database instance.'))
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
self.volume_do_not_start_on_reboot = True
super(Manager, self).do_prepare(
context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot)
def install_cluster(self, context, replication_user, cluster_configuration,
bootstrap):
@ -140,9 +80,3 @@ class Manager(manager_base.BaseMySqlManager):
LOG.debug("Storing the admin password on the instance.")
app = self.mysql_app(self.mysql_app_status.get())
app.reset_admin_password(admin_password)
def cluster_complete(self, context):
LOG.debug("Cluster creation complete, starting status checks.")
app = self.mysql_app(self.mysql_app_status.get())
status = app.status._get_actual_db_status()
app.status.set_status(status)

View File

@ -22,28 +22,28 @@ from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import sql_query
from trove.guestagent.datastore.experimental.pxc import system
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
CONF = service_base.CONF
CONF = service.CONF
CNF_CLUSTER = "cluster"
class KeepAliveConnection(service_base.BaseKeepAliveConnection):
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class PXCAppStatus(service_base.BaseMySqlAppStatus):
class PXCAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service_base.BaseLocalSqlClient):
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class PXCApp(service_base.BaseMySqlApp):
class PXCApp(service.BaseMySqlApp):
def __init__(self, status):
super(PXCApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
@ -66,7 +66,7 @@ class PXCApp(service_base.BaseMySqlApp):
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
service_base.clear_expired_password()
service.clear_expired_password()
engine = sqlalchemy.create_engine("mysql://root:@localhost:3306",
echo=True)
with LocalSqlClient(engine) as client:
@ -113,7 +113,7 @@ class PXCApp(service_base.BaseMySqlApp):
LOG.info(_("Bootstraping cluster."))
try:
mysql_service = system.service_discovery(
service_base.MYSQL_SERVICE_CANDIDATES)
service.MYSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(
mysql_service['cmd_bootstrap_pxc_cluster'],
shell=True, timeout=timeout)
@ -137,13 +137,13 @@ class PXCApp(service_base.BaseMySqlApp):
self.start_mysql(timeout=CONF.restore_usage_timeout)
class PXCRootAccess(service_base.BaseMySqlRootAccess):
class PXCRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(PXCRootAccess, self).__init__(LocalSqlClient,
PXCApp(PXCAppStatus.get()))
class PXCAdmin(service_base.BaseMySqlAdmin):
class PXCAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(PXCAdmin, self).__init__(LocalSqlClient, PXCRootAccess(),
PXCApp)

View File

@ -14,7 +14,6 @@
# under the License.
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
@ -24,7 +23,7 @@ from trove.common import utils
from trove.guestagent import backup
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis import service
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent.strategies.replication import get_replication_strategy
from trove.guestagent import volume
@ -38,28 +37,19 @@ REPLICATION_STRATEGY_CLASS = get_replication_strategy(REPLICATION_STRATEGY,
REPLICATION_NAMESPACE)
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
"""
This is the Redis manager class. It is dynamically loaded
based off of the service_type of the trove instance
"""
def __init__(self):
super(Manager, self).__init__(CONF)
super(Manager, self).__init__(MANAGER)
self._app = service.RedisApp()
@periodic_task.periodic_task
def update_status(self, context):
"""
Updates the redis trove instance. It is decorated with
perodic task so it is automatically called every 3 ticks.
"""
LOG.debug("Update status called.")
self._app.status.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
@property
def status(self):
return self._app.status
def change_passwords(self, context, users):
"""
@ -90,52 +80,38 @@ class Manager(periodic_task.PeriodicTasks):
raise
LOG.info(_("Restored database successfully."))
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the trove instance first comes online.
It is the first rpc message passed from the task manager.
prepare handles all the base configuration of the redis instance.
"""
try:
self._app.status.begin_install()
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
operating_system.chown(mount_point, 'redis', 'redis',
as_root=True)
LOG.debug('Mounted the volume.')
self._app.install_if_needed(packages)
LOG.info(_('Writing redis configuration.'))
if cluster_config:
config_contents = (config_contents + "\n"
+ "cluster-enabled yes\n"
+ "cluster-config-file cluster.conf\n")
self._app.configuration_manager.save_configuration(config_contents)
self._app.apply_initial_guestagent_configuration()
if backup_info:
persistence_dir = self._app.get_working_dir()
self._perform_restore(backup_info, context, persistence_dir,
self._app)
else:
self._app.restart()
if snapshot:
self.attach_replica(context, snapshot, snapshot['config'])
if cluster_config:
self._app.status.set_status(
rd_instance.ServiceStatuses.BUILD_PENDING)
else:
self._app.complete_install_or_restart()
LOG.info(_('Redis instance has been setup and configured.'))
except Exception:
LOG.exception(_("Error setting up Redis instance."))
self._app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
operating_system.chown(mount_point, 'redis', 'redis',
as_root=True)
LOG.debug('Mounted the volume.')
self._app.install_if_needed(packages)
LOG.info(_('Writing redis configuration.'))
if cluster_config:
config_contents = (config_contents + "\n"
+ "cluster-enabled yes\n"
+ "cluster-config-file cluster.conf\n")
self._app.configuration_manager.save_configuration(config_contents)
self._app.apply_initial_guestagent_configuration()
if backup_info:
persistence_dir = self._app.get_working_dir()
self._perform_restore(backup_info, context, persistence_dir,
self._app)
else:
# If we're not restoring, we have to force a restart of the
# server manually so that the configuration stuff takes effect
self._app.restart()
if snapshot:
self.attach_replica(context, snapshot, snapshot['config'])
def restart(self, context):
"""
@ -162,13 +138,6 @@ class Manager(periodic_task.PeriodicTasks):
LOG.debug("Stop DB called.")
self._app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("Get Filesystem Stats.")
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def create_backup(self, context, backup_info):
"""Create a backup of the database."""
LOG.debug("Creating backup.")
@ -288,8 +257,7 @@ class Manager(periodic_task.PeriodicTasks):
replication.snapshot_for_replication(context, self._app, None,
snapshot_info))
mount_point = CONF.get(MANAGER).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
volume_stats = self.get_filesystem_stats(context, None)
replication_snapshot = {
'dataset': {
@ -414,7 +382,3 @@ class Manager(periodic_task.PeriodicTasks):
LOG.debug("Executing cluster_addslots to assign hash slots %s-%s.",
first_slot, last_slot)
self._app.cluster_addslots(first_slot, last_slot)
def cluster_complete(self, context):
LOG.debug("Cluster creation complete, starting status checks.")
self._app.complete_install_or_restart()

View File

@ -128,13 +128,6 @@ class RedisApp(object):
self._install_redis(packages)
LOG.info(_('Redis installed completely.'))
def complete_install_or_restart(self):
"""
finalize status updates for install or restart.
"""
LOG.debug("Complete install or restart called.")
self.status.end_install_or_restart()
def _install_redis(self, packages):
"""
Install the redis server.
@ -175,7 +168,7 @@ class RedisApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_('Could not stop Redis.'))
self.status.end_install_or_restart()
self.status.end_restart()
def restart(self):
"""
@ -187,7 +180,7 @@ class RedisApp(object):
self.stop_db()
self.start_redis()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def update_overrides(self, context, overrides, remove=False):
if overrides:
@ -271,7 +264,7 @@ class RedisApp(object):
root_helper='sudo')
except exception.ProcessExecutionError:
LOG.exception(_('Error killing stalled redis start command.'))
self.status.end_install_or_restart()
self.status.end_restart()
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.

View File

@ -14,87 +14,62 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_ins
from trove.guestagent.datastore.experimental.vertica.service import (
VerticaAppStatus)
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = 'vertica' if not CONF.datastore_manager else CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
class Manager(manager.Manager):
def __init__(self):
self.appStatus = VerticaAppStatus()
self.app = VerticaApp(self.appStatus)
super(Manager, self).__init__(CONF)
super(Manager, self).__init__('vertica')
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the Vertica service."""
self.appStatus.update()
@property
def status(self):
return self.appStatus
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None,
snapshot=None, path_exists_function=os.path.exists):
"""Makes ready DBAAS on a Guest container."""
try:
LOG.info(_("Setting instance status to BUILDING."))
self.appStatus.begin_install()
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if path_exists_function(mount_point):
# rsync any existing data
device.migrate_data(mount_point)
# mount the volume
device.mount(mount_point)
LOG.debug("Mounted the volume.")
self.app.install_if_needed(packages)
self.app.prepare_for_install_vertica()
if cluster_config is None:
self.app.install_vertica()
self.app.create_db()
self.app.complete_install_or_restart()
elif cluster_config['instance_type'] == "member":
self.appStatus.set_status(rd_ins.ServiceStatuses.BUILD_PENDING)
else:
LOG.error(_("Bad cluster configuration; instance type "
"given as %s.") % cluster_config['instance_type'])
raise RuntimeError("Bad cluster configuration.")
LOG.info(_('Completed setup of Vertica database instance.'))
except Exception:
LOG.exception(_('Cannot prepare Vertica database instance.'))
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
# rsync any existing data
device.migrate_data(mount_point)
# mount the volume
device.mount(mount_point)
LOG.debug("Mounted the volume.")
self.app.install_if_needed(packages)
self.app.prepare_for_install_vertica()
if cluster_config is None:
self.app.install_vertica()
self.app.create_db()
elif cluster_config['instance_type'] != "member":
raise RuntimeError(_("Bad cluster configuration: instance type "
"given as %s.") %
cluster_config['instance_type'])
def restart(self, context):
LOG.debug("Restarting the database.")
self.app.restart()
LOG.debug("Restarted the database.")
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("Finding the file-systems stats.")
mount_point = CONF.get(MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def stop_db(self, context, do_not_start_on_reboot=False):
LOG.debug("Stopping the database.")
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
@ -129,64 +104,64 @@ class Manager(periodic_task.PeriodicTasks):
def change_passwords(self, context, users):
LOG.debug("Changing password.")
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
operation='change_passwords', datastore=self.manager)
def update_attributes(self, context, username, hostname, user_attrs):
LOG.debug("Updating database attributes.")
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
operation='update_attributes', datastore=self.manager)
def create_database(self, context, databases):
LOG.debug("Creating database.")
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
operation='create_database', datastore=self.manager)
def create_user(self, context, users):
LOG.debug("Creating user.")
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
operation='create_user', datastore=self.manager)
def delete_database(self, context, database):
LOG.debug("Deleting database.")
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
operation='delete_database', datastore=self.manager)
def delete_user(self, context, user):
LOG.debug("Deleting user.")
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
operation='delete_user', datastore=self.manager)
def get_user(self, context, username, hostname):
LOG.debug("Getting user.")
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
operation='get_user', datastore=self.manager)
def grant_access(self, context, username, hostname, databases):
LOG.debug("Granting acccess.")
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
operation='grant_access', datastore=self.manager)
def revoke_access(self, context, username, hostname, database):
LOG.debug("Revoking access.")
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
operation='revoke_access', datastore=self.manager)
def list_access(self, context, username, hostname):
LOG.debug("Listing access.")
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
operation='list_access', datastore=self.manager)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing databases.")
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
operation='list_databases', datastore=self.manager)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing users.")
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
operation='list_users', datastore=self.manager)
def enable_root(self, context):
LOG.debug("Enabling root.")
@ -203,7 +178,7 @@ class Manager(periodic_task.PeriodicTasks):
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
operation='create_backup', datastore=self.manager)
def start_db_with_conf_changes(self, context, config_contents):
LOG.debug("Starting with configuration changes.")
@ -226,8 +201,3 @@ class Manager(periodic_task.PeriodicTasks):
LOG.exception(_('Cluster installation failed.'))
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
raise
def cluster_complete(self, context):
LOG.debug("Cluster creation complete, starting status checks.")
status = self.appStatus._get_actual_db_status()
self.appStatus.set_status(status)

View File

@ -107,7 +107,7 @@ class VerticaApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop Vertica."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not stop Vertica!")
LOG.debug("Database stopped.")
else:
@ -131,7 +131,7 @@ class VerticaApp(object):
(system.START_DB % (DB_NAME, db_password))]
subprocess.Popen(start_db_command)
if not self.status._is_restarting:
self.status.end_install_or_restart()
self.status.end_restart()
LOG.debug("Database started.")
except Exception:
raise RuntimeError("Could not start Vertica!")
@ -151,7 +151,7 @@ class VerticaApp(object):
self.stop_db()
self.start_db()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def create_db(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
@ -182,9 +182,6 @@ class VerticaApp(object):
self._generate_database_password()
LOG.info(_("install_vertica completed."))
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def _generate_database_password(self):
"""Generate and write the password to vertica.cnf file."""
config = ConfigParser.ConfigParser()

View File

@ -0,0 +1,196 @@
# Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common.i18n import _
from trove.guestagent import dbaas
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
"""This is the base class for all datastore managers. Over time, common
functionality should be pulled back here from the existing managers.
"""
def __init__(self, manager_name):
super(Manager, self).__init__(CONF)
# Manager properties
self.__manager_name = manager_name
self.__manager = None
self.__prepare_error = False
@property
def manager_name(self):
"""This returns the passed-in name of the manager."""
return self.__manager_name
@property
def manager(self):
"""This returns the name of the manager."""
if not self.__manager:
self.__manager = CONF.datastore_manager or self.__manager_name
return self.__manager
@property
def prepare_error(self):
return self.__prepare_error
@prepare_error.setter
def prepare_error(self, prepare_error):
self.__prepare_error = prepare_error
@property
def configuration_manager(self):
"""If the datastore supports the new-style configuration manager,
it should override this to return it.
"""
return None
@abc.abstractproperty
def status(self):
"""This should return an instance of a status class that has been
inherited from datastore.service.BaseDbStatus. Each datastore
must implement this property.
"""
return None
################
# Status related
################
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the trove instance. It is decorated with
perodic_task so it is called automatically.
"""
LOG.debug("Update status called.")
self.status.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
#################
# Prepare related
#################
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""Set up datastore on a Guest Instance."""
LOG.info(_("Starting datastore prepare for '%s'.") % self.manager)
self.status.begin_install()
post_processing = True if cluster_config else False
try:
self.do_prepare(context, packages, databases, memory_mb,
users, device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot)
except Exception as ex:
self.prepare_error = True
LOG.exception(_("An error occurred preparing datastore: %s") %
ex.message)
raise
finally:
LOG.info(_("Ending datastore prepare for '%s'.") % self.manager)
self.status.end_install(error_occurred=self.prepare_error,
post_processing=post_processing)
# At this point critical 'prepare' work is done and the instance
# is now in the correct 'ACTIVE' 'INSTANCE_READY' or 'ERROR' state.
# Of cource if an error has occurred, none of the code that follows
# will run.
LOG.info(_('Completed setup of datastore successfully.'))
# We only create databases and users automatically for non-cluster
# instances.
if not cluster_config:
try:
if databases:
LOG.debug('Calling add databases.')
self.create_database(context, databases)
if users:
LOG.debug('Calling add users.')
self.create_user(context, users)
except Exception as ex:
LOG.exception(_("An error occurred creating databases/users: "
"%s") % ex.message)
raise
try:
LOG.debug('Calling post_prepare.')
self.post_prepare(context, packages, databases, memory_mb,
users, device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot)
except Exception as ex:
LOG.exception(_("An error occurred in post prepare: %s") %
ex.message)
raise
@abc.abstractmethod
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
"""This is called from prepare when the Trove instance first comes
online. 'Prepare' is the first rpc message passed from the
task manager. do_prepare handles all the base configuration of
the instance and is where the actual work is done. Once this method
completes, the datastore is considered either 'ready' for use (or
for final connections to other datastores) or in an 'error' state,
and the status is changed accordingly. Each datastore must
implement this method.
"""
pass
def post_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
"""This is called after prepare has completed successfully.
Processing done here should be limited to things that will not
affect the actual 'running' status of the datastore (for example,
creating databases and users, although these are now handled
automatically). Any exceptions are caught, logged and rethrown,
however no status changes are made and the end-user will not be
informed of the error.
"""
LOG.debug('No post_prepare work has been defined.')
pass
#####################
# File System related
#####################
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
# TODO(peterstac) - note that fs_path is not used in this method.
mount_point = CONF.get(self.manager).mount_point
LOG.debug("Getting file system stats for '%s'" % mount_point)
return dbaas.get_filesystem_volume_stats(mount_point)
#################
# Cluster related
#################
def cluster_complete(self, context):
LOG.debug("Cluster creation complete, starting status checks.")
self.status.end_install()

View File

@ -18,9 +18,10 @@
from oslo_utils import importutils
from trove.common import cfg
from trove.guestagent.datastore.mysql import manager_base
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.strategies.replication import get_replication_strategy
CONF = cfg.CONF
MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mysql'
REPLICATION_STRATEGY = CONF.get(MANAGER).replication_strategy
@ -33,7 +34,7 @@ MYSQL_APP_STATUS = "trove.guestagent.datastore.mysql.service.MySqlAppStatus"
MYSQL_ADMIN = "trove.guestagent.datastore.mysql.service.MySqlAdmin"
class Manager(manager_base.BaseMySqlManager):
class Manager(manager.MySqlManager):
def __init__(self):
mysql_app = importutils.import_class(MYSQL_APP)

View File

@ -17,37 +17,37 @@
#
from oslo_log import log as logging
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
CONF = service_base.CONF
CONF = service.CONF
class KeepAliveConnection(service_base.BaseKeepAliveConnection):
class KeepAliveConnection(service.BaseKeepAliveConnection):
pass
class MySqlAppStatus(service_base.BaseMySqlAppStatus):
class MySqlAppStatus(service.BaseMySqlAppStatus):
pass
class LocalSqlClient(service_base.BaseLocalSqlClient):
class LocalSqlClient(service.BaseLocalSqlClient):
pass
class MySqlApp(service_base.BaseMySqlApp):
class MySqlApp(service.BaseMySqlApp):
def __init__(self, status):
super(MySqlApp, self).__init__(status, LocalSqlClient,
KeepAliveConnection)
class MySqlRootAccess(service_base.BaseMySqlRootAccess):
class MySqlRootAccess(service.BaseMySqlRootAccess):
def __init__(self):
super(MySqlRootAccess, self).__init__(LocalSqlClient,
MySqlApp(MySqlAppStatus.get()))
class MySqlAdmin(service_base.BaseMySqlAdmin):
class MySqlAdmin(service.BaseMySqlAdmin):
def __init__(self):
super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(),
MySqlApp)

View File

@ -19,38 +19,36 @@
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent import backup
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.mysql import service_base
from trove.guestagent import dbaas
from trove.guestagent.datastore import manager
from trove.guestagent.datastore.mysql_common import service
from trove.guestagent.strategies.replication import get_replication_strategy
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseMySqlManager(periodic_task.PeriodicTasks):
class MySqlManager(manager.Manager):
def __init__(self, mysql_app, mysql_app_status, mysql_admin,
replication_strategy, replication_namespace,
replication_strategy_class, manager):
replication_strategy_class, manager_name):
super(BaseMySqlManager, self).__init__(CONF)
super(MySqlManager, self).__init__(manager_name)
self._mysql_app = mysql_app
self._mysql_app_status = mysql_app_status
self._mysql_admin = mysql_admin
self._replication_strategy = replication_strategy
self._replication_namespace = replication_namespace
self._replication_strategy_class = replication_strategy_class
self._manager = manager
self.volume_do_not_start_on_reboot = False
@property
def mysql_app(self):
@ -78,17 +76,8 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
self._replication_namespace)
@property
def manager(self):
return self._manager
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the MySQL service."""
self.mysql_app_status.get().update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def status(self):
return self.mysql_app_status.get()
def change_passwords(self, context, users):
return self.mysql_admin().change_passwords(users)
@ -156,18 +145,17 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
raise
LOG.info(_("Restored database successfully."))
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""Makes ready DBAAS on a Guest container."""
self.mysql_app_status.get().begin_install()
# status end_mysql_install set with secure()
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
app = self.mysql_app(self.mysql_app_status.get())
app.install_if_needed(packages)
if device_path:
# stop and do not update database
app.stop_db()
app.stop_db(
do_not_start_on_reboot=self.volume_do_not_start_on_reboot)
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
@ -178,8 +166,8 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
device.migrate_data(mount_point, target_subdir="data")
# mount the volume
device.mount(mount_point)
operating_system.chown(mount_point, service_base.MYSQL_OWNER,
service_base.MYSQL_OWNER,
operating_system.chown(mount_point, service.MYSQL_OWNER,
service.MYSQL_OWNER,
recursive=False, as_root=True)
LOG.debug("Mounted the volume at %s." % mount_point)
@ -205,19 +193,9 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
else:
app.secure_root(secure_remote_root=True)
app.complete_install_or_restart()
if databases:
self.create_database(context, databases)
if users:
self.create_user(context, users)
if snapshot:
self.attach_replica(context, snapshot, snapshot['config'])
LOG.info(_('Completed setup of MySQL database instance.'))
def restart(self, context):
app = self.mysql_app(self.mysql_app_status.get())
app.restart()
@ -230,11 +208,6 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
app = self.mysql_app(self.mysql_app_status.get())
app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(self.manager).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def create_backup(self, context, backup_info):
"""
Entry point for initiating a backup for this guest agents db instance.
@ -291,8 +264,7 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
replication.snapshot_for_replication(context, app, None,
snapshot_info))
mount_point = CONF.get(self.manager).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
volume_stats = self.get_filesystem_stats(context, None)
replication_snapshot = {
'dataset': {
@ -352,8 +324,7 @@ class BaseMySqlManager(periodic_task.PeriodicTasks):
'guest_strategy': self.replication_strategy
}))
mount_point = CONF.get(self.manager).mount_point
volume_stats = dbaas.get_filesystem_volume_stats(mount_point)
volume_stats = self.get_filesystem_stats(context, None)
if (volume_stats.get('total', 0.0) <
replica_info['dataset']['dataset_size']):
raise exception.InsufficientSpaceForReplica(

View File

@ -657,9 +657,6 @@ class BaseMySqlApp(object):
LOG.info(_("Finished installing MySQL server."))
self.start_mysql()
def complete_install_or_restart(self):
self.status.end_install_or_restart()
def secure(self, config_contents, overrides):
LOG.info(_("Generating admin password."))
admin_password = utils.generate_random_password()
@ -753,7 +750,7 @@ class BaseMySqlApp(object):
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL."))
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
@ -770,7 +767,7 @@ class BaseMySqlApp(object):
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
self.status.end_restart()
def update_overrides(self, overrides):
self._apply_user_overrides(overrides)
@ -959,7 +956,7 @@ class BaseMySqlApp(object):
except exception.ProcessExecutionError:
LOG.exception(_("Error killing stalled MySQL start command."))
# There's nothing more we can do...
self.status.end_install_or_restart()
self.status.end_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, config_contents):

View File

@ -14,6 +14,7 @@
# under the License.
import os
import time
from oslo_log import log as logging
@ -23,6 +24,7 @@ from trove.common import context as trove_context
from trove.common.i18n import _
from trove.common import instance
from trove.conductor import api as conductor_api
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common import timeutils
@ -46,7 +48,7 @@ class BaseDbStatus(object):
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install_or_restart() is called, at which point the status again
end_install or end_restart() is called, at which point the status again
reflects the actual status of the DB app.
This is a base class, subclasses must implement real logic for
@ -55,30 +57,82 @@ class BaseDbStatus(object):
_instance = None
GUESTAGENT_DIR = '~'
PREPARE_START_FILENAME = '.guestagent.prepare.start'
PREPARE_END_FILENAME = '.guestagent.prepare.end'
def __init__(self):
if self._instance is not None:
raise RuntimeError("Cannot instantiate twice.")
self.status = None
self.restart_mode = False
self._prepare_completed = None
@property
def prepare_completed(self):
if self._prepare_completed is None:
# Force the file check
self.prepare_completed = None
return self._prepare_completed
@prepare_completed.setter
def prepare_completed(self, value):
# Set the value based on the existence of the file; 'value' is ignored
# This is required as the value of prepare_completed is cached, so
# this must be referenced any time the existence of the file changes
self._prepare_completed = os.path.isfile(
guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME))
def begin_install(self):
"""Called right before DB is prepared."""
self.set_status(instance.ServiceStatuses.BUILDING)
"""First call of the DB prepare."""
prepare_start_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME)
operating_system.write_file(prepare_start_file, '')
self.prepare_completed = False
self.set_status(instance.ServiceStatuses.BUILDING, True)
def begin_restart(self):
"""Called before restarting DB server."""
self.restart_mode = True
def end_install_or_restart(self):
"""Called after DB is installed or restarted.
def end_install(self, error_occurred=False, post_processing=False):
"""Called after prepare has ended."""
# Set the "we're done" flag if there's no error and
# no post_processing is necessary
if not (error_occurred or post_processing):
prepare_end_file = guestagent_utils.build_file_path(
self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)
operating_system.write_file(prepare_end_file, '')
self.prepare_completed = True
final_status = None
if error_occurred:
final_status = instance.ServiceStatuses.FAILED
elif post_processing:
final_status = instance.ServiceStatuses.INSTANCE_READY
if final_status:
LOG.info(_("Set final status to %s.") % final_status)
self.set_status(final_status, force=True)
else:
self._end_install_or_restart(True)
def end_restart(self):
self.restart_mode = False
LOG.info(_("Ending restart."))
self._end_install_or_restart(False)
def _end_install_or_restart(self, force):
"""Called after DB is installed or restarted.
Updates the database with the actual DB server status.
"""
LOG.debug("Ending install_if_needed or restart.")
self.restart_mode = False
real_status = self._get_actual_db_status()
LOG.info(_("Updating database status to %s.") % real_status)
self.set_status(real_status, force=True)
LOG.info(_("Current database status is '%s'.") % real_status)
self.set_status(real_status, force=force)
def _get_actual_db_status(self):
raise NotImplementedError()
@ -89,10 +143,7 @@ class BaseDbStatus(object):
True if DB app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return (self.status != instance.ServiceStatuses.NEW and
self.status != instance.ServiceStatuses.BUILDING and
self.status != instance.ServiceStatuses.BUILD_PENDING and
self.status != instance.ServiceStatuses.FAILED)
return self.prepare_completed
@property
def _is_restarting(self):
@ -106,28 +157,19 @@ class BaseDbStatus(object):
def set_status(self, status, force=False):
"""Use conductor to update the DB app status."""
force_heartbeat_status = (
status == instance.ServiceStatuses.FAILED or
status == instance.ServiceStatuses.BUILD_PENDING)
if (not force_heartbeat_status and not force and
(self.status == instance.ServiceStatuses.NEW or
self.status == instance.ServiceStatuses.BUILDING)):
LOG.debug("Prepare has not run yet, skipping heartbeat.")
return
if force or self.is_installed:
LOG.debug("Casting set_status message to conductor "
"(status is '%s')." % status.description)
context = trove_context.TroveContext()
LOG.debug("Casting set_status message to conductor (status is '%s')." %
status.description)
context = trove_context.TroveContext()
heartbeat = {
'service_status': status.description,
}
conductor_api.API(context).heartbeat(CONF.guest_id,
heartbeat,
sent=timeutils.float_utcnow())
LOG.debug("Successfully cast set_status.")
self.status = status
heartbeat = {'service_status': status.description}
conductor_api.API(context).heartbeat(
CONF.guest_id, heartbeat, sent=timeutils.float_utcnow())
LOG.debug("Successfully cast set_status.")
self.status = status
else:
LOG.debug("Prepare has not completed yet, skipping heartbeat.")
def update(self):
"""Find and report status of DB on this machine.
@ -170,7 +212,7 @@ class BaseDbStatus(object):
LOG.exception(e)
raise RuntimeError(_("Database restart failed."))
finally:
self.end_install_or_restart()
self.end_restart()
def start_db_service(self, service_candidates, timeout,
enable_on_boot=True, update_db=False):

View File

@ -21,7 +21,7 @@ from oslo_log import log as logging
from trove.common.i18n import _
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service_base import ADMIN_USER_NAME
from trove.guestagent.datastore.mysql_common.service import ADMIN_USER_NAME
from trove.guestagent.strategies.backup import base
LOG = logging.getLogger(__name__)

View File

@ -214,7 +214,7 @@ class ClusterTasks(Cluster):
LOG.debug("Instance %s in %s, exiting polling." % (
instance_id, status))
return True
if status != ServiceStatuses.BUILD_PENDING:
if status != ServiceStatuses.INSTANCE_READY:
# if one is not in a cluster-ready state,
# continue polling
LOG.debug("Instance %s in %s, continue polling." % (
@ -574,7 +574,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
service = InstanceServiceStatus.find_by(instance_id=self.id)
status = service.get_status()
if (status == rd_instance.ServiceStatuses.RUNNING or
status == rd_instance.ServiceStatuses.BUILD_PENDING):
status == rd_instance.ServiceStatuses.INSTANCE_READY):
return True
elif status not in [rd_instance.ServiceStatuses.NEW,
rd_instance.ServiceStatuses.BUILDING]:

View File

@ -16,7 +16,7 @@ import hashlib
import mock
import os
from mock import Mock, MagicMock, patch, ANY
from mock import Mock, MagicMock, patch, ANY, DEFAULT
from oslo_utils import netutils
from webob.exc import HTTPNotFound
@ -168,6 +168,15 @@ class BackupAgentTest(trove_testtools.TestCase):
def setUp(self):
super(BackupAgentTest, self).setUp()
self.patch_ope = patch.multiple('os.path',
exists=DEFAULT)
self.mock_ope = self.patch_ope.start()
self.addCleanup(self.patch_ope.stop)
self.patch_pc = patch('trove.guestagent.datastore.service.'
'BaseDbStatus.prepare_completed')
self.mock_pc = self.patch_pc.start()
self.mock_pc.__get__ = Mock(return_value=True)
self.addCleanup(self.patch_pc.stop)
self.get_auth_pwd_patch = patch.object(
MySqlApp, 'get_auth_password', MagicMock(return_value='123'))
self.get_auth_pwd_mock = self.get_auth_pwd_patch.start()
@ -242,7 +251,7 @@ class BackupAgentTest(trove_testtools.TestCase):
self.assertIsNotNone(cbbackup.manifest)
self.assertIn('gz.enc', cbbackup.manifest)
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
@mock.patch.object(MongoDBApp, '_init_overrides_dir', return_value='')
def test_backup_impl_MongoDump(self, _):
netutils.get_my_ipv4 = Mock(return_value="1.1.1.1")
utils.execute_with_timeout = Mock(return_value=None)

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import ANY, DEFAULT, patch
from mock import ANY, DEFAULT, Mock, patch
from testtools.testcase import ExpectedException
from trove.common import exception
from trove.common import utils
@ -96,6 +96,11 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
def setUp(self):
super(GuestAgentBackupTest, self).setUp()
self.patch_pc = patch('trove.guestagent.datastore.service.'
'BaseDbStatus.prepare_completed')
self.mock_pc = self.patch_pc.start()
self.mock_pc.__get__ = Mock(return_value=True)
self.addCleanup(self.patch_pc.stop)
self.get_auth_pwd_patch = patch.object(
MySqlApp, 'get_auth_password', mock.Mock(return_value='password'))
self.get_auth_pwd_mock = self.get_auth_pwd_patch.start()
@ -333,7 +338,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
# (see bug/1423759).
remove.assert_called_once_with(ANY, force=True, as_root=True)
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
@mock.patch.object(MongoDBApp, '_init_overrides_dir',
return_value='')
def test_backup_encrypted_mongodump_command(self, _):
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
@ -345,7 +351,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
MONGODUMP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command)
self.assertIn("gz.enc", bkp.manifest)
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
@mock.patch.object(MongoDBApp, '_init_overrides_dir',
return_value='')
def test_backup_not_encrypted_mongodump_command(self, _):
backupBase.BackupRunner.is_encrypted = False
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
@ -356,7 +363,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
self.assertEqual(MONGODUMP_CMD + PIPE + ZIP, bkp.command)
self.assertIn("gz", bkp.manifest)
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
@mock.patch.object(MongoDBApp, '_init_overrides_dir',
return_value='')
def test_restore_decrypted_mongodump_command(self, _):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
@ -365,7 +373,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
location="filename", checksum="md5")
self.assertEqual(restr.restore_cmd, UNZIP + PIPE + MONGODUMP_RESTORE)
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
@mock.patch.object(MongoDBApp, '_init_overrides_dir',
return_value='')
def test_restore_encrypted_mongodump_command(self, _):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
@ -520,7 +529,8 @@ class MongodbBackupTests(trove_testtools.TestCase):
self.exec_timeout_patch.start()
self.mongodb_init_overrides_dir_patch = patch.object(
MongoDBApp,
'_init_overrides_dir')
'_init_overrides_dir',
return_value='')
self.mongodb_init_overrides_dir_patch.start()
self.backup_runner = utils.import_class(
BACKUP_MONGODUMP_CLS)
@ -558,10 +568,14 @@ class MongodbBackupTests(trove_testtools.TestCase):
class MongodbRestoreTests(trove_testtools.TestCase):
@patch.object(MongoDBApp, '_init_overrides_dir')
@patch.object(MongoDBApp, '_init_overrides_dir',
return_value='')
def setUp(self, _):
super(MongodbRestoreTests, self).setUp()
self.patch_ope = patch('os.path.expanduser')
self.mock_ope = self.patch_ope.start()
self.addCleanup(self.patch_ope.stop)
self.restore_runner = utils.import_class(
RESTORE_MONGODUMP_CLS)('swift', location='http://some.where',
checksum='True_checksum',

View File

@ -47,8 +47,6 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
self.origin_start_db = couch_service.CouchbaseApp.start_db
self.origin_restart = couch_service.CouchbaseApp.restart
self.origin_install_if = couch_service.CouchbaseApp.install_if_needed
self.origin_complete_install = \
couch_service.CouchbaseApp.complete_install_or_restart
netutils.get_my_ipv4 = MagicMock()
def tearDown(self):
@ -61,8 +59,6 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
couch_service.CouchbaseApp.start_db = self.origin_start_db
couch_service.CouchbaseApp.restart = self.origin_restart
couch_service.CouchbaseApp.install_if_needed = self.origin_install_if
couch_service.CouchbaseApp.complete_install_or_restart = \
self.origin_complete_install
def test_update_status(self):
mock_status = MagicMock()
@ -97,8 +93,6 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
return_value=None)
couch_service.CouchbaseApp.initial_setup = MagicMock(
return_value=None)
couch_service.CouchbaseApp.complete_install_or_restart = MagicMock(
return_value=None)
backup.restore = MagicMock(return_value=None)
# invocation
@ -112,8 +106,6 @@ class GuestAgentCouchbaseManagerTest(testtools.TestCase):
mock_status.begin_install.assert_any_call()
couch_service.CouchbaseApp.install_if_needed.assert_any_call(
self.packages)
couch_service.CouchbaseApp.complete_install_or_restart.\
assert_any_call()
if backup_info:
backup.restore.assert_any_call(self.context,
backup_info,

View File

@ -76,7 +76,7 @@ from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import MySqlRootAccess
import trove.guestagent.datastore.mysql.service_base as dbaas_base
import trove.guestagent.datastore.mysql_common.service as dbaas_base
import trove.guestagent.datastore.service as base_datastore_service
from trove.guestagent.datastore.service import BaseDbStatus
from trove.guestagent.db import models
@ -112,6 +112,7 @@ class FakeAppStatus(BaseDbStatus):
self.id = id
self.status = status
self.next_fake_status = status
self._prepare_completed = None
def _get_actual_db_status(self):
return self.next_fake_status
@ -1822,46 +1823,32 @@ class BaseDbStatusTest(testtools.TestCase):
self.assertTrue(base_db_status.restart_mode)
def test_end_install_or_restart(self):
def test_end_restart(self):
base_db_status = BaseDbStatus()
base_db_status._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.SHUTDOWN)
base_db_status.end_install_or_restart()
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=True)
base_db_status.end_restart()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
base_db_status.status)
self.assertFalse(base_db_status.restart_mode)
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
base_db_status.status)
self.assertFalse(base_db_status.restart_mode)
def test_is_installed(self):
base_db_status = BaseDbStatus()
base_db_status.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(base_db_status.is_installed)
def test_is_installed_none(self):
base_db_status = BaseDbStatus()
base_db_status.status = None
self.assertTrue(base_db_status.is_installed)
def test_is_installed_building(self):
base_db_status = BaseDbStatus()
base_db_status.status = rd_instance.ServiceStatuses.BUILDING
self.assertFalse(base_db_status.is_installed)
def test_is_installed_new(self):
base_db_status = BaseDbStatus()
base_db_status.status = rd_instance.ServiceStatuses.NEW
self.assertFalse(base_db_status.is_installed)
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=True)
self.assertTrue(base_db_status.is_installed)
def test_is_installed_failed(self):
base_db_status = BaseDbStatus()
base_db_status.status = rd_instance.ServiceStatuses.FAILED
self.assertFalse(base_db_status.is_installed)
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=False)
self.assertFalse(base_db_status.is_installed)
def test_is_restarting(self):
base_db_status = BaseDbStatus()
@ -1902,13 +1889,15 @@ class BaseDbStatusTest(testtools.TestCase):
(rd_instance.ServiceStatuses.SHUTDOWN, 10))
def _test_set_status(self, initial_status, new_status,
expected_status, force=False):
expected_status, install_done=False, force=False):
base_db_status = BaseDbStatus()
base_db_status.status = initial_status
base_db_status.set_status(new_status, force=force)
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=install_done)
base_db_status.set_status(new_status, force=force)
self.assertEqual(expected_status,
base_db_status.status)
self.assertEqual(expected_status,
base_db_status.status)
def test_set_status_force_heartbeat(self):
self._test_set_status(rd_instance.ServiceStatuses.BUILDING,
@ -1929,12 +1918,20 @@ class BaseDbStatusTest(testtools.TestCase):
def test_set_status_to_failed(self):
self._test_set_status(rd_instance.ServiceStatuses.BUILDING,
rd_instance.ServiceStatuses.FAILED,
rd_instance.ServiceStatuses.FAILED)
rd_instance.ServiceStatuses.FAILED,
force=True)
def test_set_status_to_build_pending(self):
self._test_set_status(rd_instance.ServiceStatuses.BUILDING,
rd_instance.ServiceStatuses.BUILD_PENDING,
rd_instance.ServiceStatuses.BUILD_PENDING)
rd_instance.ServiceStatuses.INSTANCE_READY,
rd_instance.ServiceStatuses.INSTANCE_READY,
force=True)
def test_set_status_to_shutdown(self):
self._test_set_status(rd_instance.ServiceStatuses.RUNNING,
rd_instance.ServiceStatuses.SHUTDOWN,
rd_instance.ServiceStatuses.SHUTDOWN,
install_done=True)
def test_wait_for_database_service_status(self):
status = BaseDbStatus()
@ -2087,14 +2084,14 @@ class BaseDbStatusTest(testtools.TestCase):
# Trove instance status updates are suppressed during restart.
with patch.multiple(
status, start_db_service=DEFAULT, stop_db_service=DEFAULT,
begin_restart=DEFAULT, end_install_or_restart=DEFAULT):
begin_restart=DEFAULT, end_restart=DEFAULT):
status.restart_db_service(service_candidates, 10)
status.begin_restart.assert_called_once_with()
status.stop_db_service.assert_called_once_with(
service_candidates, 10, disable_on_boot=False, update_db=False)
status.start_db_service.assert_called_once_with(
service_candidates, 10, enable_on_boot=False, update_db=False)
status.end_install_or_restart.assert_called_once_with()
status.end_restart.assert_called_once_with()
# Test a failing call.
# Assert the status heartbeat gets re-enabled.
@ -2102,12 +2099,12 @@ class BaseDbStatusTest(testtools.TestCase):
status, start_db_service=Mock(
side_effect=Exception("Error in database start.")),
stop_db_service=DEFAULT, begin_restart=DEFAULT,
end_install_or_restart=DEFAULT):
end_restart=DEFAULT):
self.assertRaisesRegexp(
RuntimeError, "Database restart failed.",
status.restart_db_service, service_candidates, 10)
status.begin_restart.assert_called_once_with()
status.end_install_or_restart.assert_called_once_with()
status.end_restart.assert_called_once_with()
class MySqlAppStatusTest(testtools.TestCase):
@ -2187,6 +2184,9 @@ class TestRedisApp(testtools.TestCase):
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.orig_os_path_eu = os.path.expanduser
os.path.expanduser = Mock(return_value='/tmp/.file')
with patch.multiple(RedisApp, _build_admin_client=DEFAULT,
_init_overrides_dir=DEFAULT):
self.app = RedisApp(state_change_wait_time=0)
@ -2200,6 +2200,7 @@ class TestRedisApp(testtools.TestCase):
super(TestRedisApp, self).tearDown()
self.app = None
os.path.isfile = self.orig_os_path_isfile
os.path.expanduser = self.orig_os_path_eu
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
rservice.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
@ -2310,7 +2311,7 @@ class TestRedisApp(testtools.TestCase):
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(mock_status.end_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
@ -2320,13 +2321,13 @@ class TestRedisApp(testtools.TestCase):
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(RedisApp, 'stop_db', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
mock_status.end_install_or_restart = MagicMock(
mock_status.end_restart = MagicMock(
return_value=None)
self.app.restart()
mock_status.begin_restart.assert_any_call()
RedisApp.stop_db.assert_any_call()
RedisApp.start_redis.assert_any_call()
mock_status.end_install_or_restart.assert_any_call()
mock_status.end_restart.assert_any_call()
def test_start_redis(self):
mock_status = MagicMock()
@ -2340,14 +2341,14 @@ class TestRedisApp(testtools.TestCase):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
mock_status.end_install_or_restart = MagicMock()
mock_status.end_restart = MagicMock()
self._assert_start_redis(mock_status)
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
run_as_root=True, root_helper='sudo')
mock_status.end_install_or_restart.assert_called_once_with()
mock_status.end_restart.assert_called_once_with()
@patch.multiple(operating_system, start_service=DEFAULT,
enable_service_on_boot=DEFAULT)
@ -2633,11 +2634,13 @@ class CouchbaseAppTest(testtools.TestCase):
self.couchbaseApp.stop_db = Mock()
self.couchbaseApp.start_db = Mock()
self.couchbaseApp.restart()
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=True)
self.couchbaseApp.restart()
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchservice.utils.execute_with_timeout = Mock()
@ -2660,8 +2663,10 @@ class CouchbaseAppTest(testtools.TestCase):
self.couchbaseApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=True)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_install_when_couchbase_installed(self):
couchservice.packager.pkg_is_installed = Mock(return_value=True)
@ -2736,11 +2741,13 @@ class CouchDBAppTest(testtools.TestCase):
self.couchdbApp.stop_db = Mock()
self.couchdbApp.start_db = Mock()
self.couchdbApp.restart()
with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc:
patch_pc.__get__ = Mock(return_value=True)
self.couchdbApp.restart()
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
@ -2785,6 +2792,8 @@ class MongoDBAppTest(testtools.TestCase):
self.orig_packager = mongo_system.PACKAGER
self.orig_service_discovery = operating_system.service_discovery
self.orig_os_unlink = os.unlink
self.orig_os_path_eu = os.path.expanduser
os.path.expanduser = Mock(return_value='/tmp/.file')
operating_system.service_discovery = (
self.fake_mongodb_service_discovery)
@ -2807,6 +2816,7 @@ class MongoDBAppTest(testtools.TestCase):
mongo_system.PACKAGER = self.orig_packager
operating_system.service_discovery = self.orig_service_discovery
os.unlink = self.orig_os_unlink
os.path.expanduser = self.orig_os_path_eu
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
@ -3194,7 +3204,7 @@ class VerticaAppTest(testtools.TestCase):
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(VerticaApp, 'stop_db', return_value=None):
with patch.object(VerticaApp, 'start_db', return_value=None):
mock_status.end_install_or_restart = MagicMock(
mock_status.end_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
@ -3208,7 +3218,7 @@ class VerticaAppTest(testtools.TestCase):
with patch.object(app, '_enable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
mock_status.end_install_or_restart = MagicMock(
mock_status.end_restart = MagicMock(
return_value=None)
app.start_db()
agent_start, db_start = subprocess.Popen.call_args_list
@ -3218,7 +3228,7 @@ class VerticaAppTest(testtools.TestCase):
db_expected_cmd = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.START_DB % ('db_srvr', 'some_password'))]
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(mock_status.end_restart.called)
agent_start.assert_called_with(agent_expected_command)
db_start.assert_called_with(db_expected_cmd)
@ -3244,7 +3254,7 @@ class VerticaAppTest(testtools.TestCase):
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
mock_status.end_install_or_restart = MagicMock(
mock_status.end_restart = MagicMock(
return_value=None)
app.stop_db()
@ -3304,7 +3314,7 @@ class VerticaAppTest(testtools.TestCase):
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
mock_status.end_restart = MagicMock(
return_value=None)
self.assertRaises(RuntimeError, app.stop_db)
@ -3458,11 +3468,6 @@ class VerticaAppTest(testtools.TestCase):
side_effect=ConfigParser.Error()):
self.assertRaises(RuntimeError, self.app.read_config)
def test_complete_install_or_restart(self):
app = VerticaApp(MagicMock())
app.complete_install_or_restart()
app.status.end_install_or_restart.assert_any_call()
def test_start_db_with_conf_changes(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
@ -3470,7 +3475,7 @@ class VerticaAppTest(testtools.TestCase):
with patch.object(app, 'read_config',
return_value=self.test_config):
app.start_db_with_conf_changes('test_config_contents')
app.status.end_install_or_restart.assert_any_call()
app.status.end_restart.assert_any_call()
class DB2AppTest(testtools.TestCase):

View File

@ -30,13 +30,15 @@ import trove.tests.unittests.trove_testtools as trove_testtools
class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir')
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir',
return_value='')
def setUp(self, _):
super(GuestAgentMongoDBClusterManagerTest, self).setUp()
self.context = context.TroveContext()
self.manager = manager.Manager()
self.manager.app.configuration_manager = mock.MagicMock()
self.manager.app.status = mock.MagicMock()
self.manager.app.status.set_status = mock.MagicMock()
self.manager.app.status.set_host = mock.MagicMock()
self.conf_mgr = self.manager.app.configuration_manager
self.pymongo_patch = mock.patch.object(
@ -115,7 +117,7 @@ class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
mock_config.assert_called_once_with()
mock_secure.assert_called_once_with(None)
self.manager.app.status.set_status.assert_called_with(
ds_instance.ServiceStatuses.BUILD_PENDING)
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
@mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir')
@mock.patch.object(service.MongoDBApp, '_configure_as_config_server')
@ -127,7 +129,7 @@ class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
mock_config.assert_called_once_with()
mock_secure.assert_called_once_with(None)
self.manager.app.status.set_status.assert_called_with(
ds_instance.ServiceStatuses.BUILD_PENDING)
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
@mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir')
@mock.patch.object(service.MongoDBApp, '_configure_as_cluster_member')
@ -138,7 +140,7 @@ class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
mock_config.assert_called_once_with('rs1')
mock_secure.assert_called_once_with(None)
self.manager.app.status.set_status.assert_called_with(
ds_instance.ServiceStatuses.BUILD_PENDING)
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
@mock.patch.object(operating_system, 'write_file')
@mock.patch.object(service.MongoDBApp, '_configure_network')

View File

@ -27,7 +27,8 @@ import trove.tests.unittests.trove_testtools as trove_testtools
class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir')
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir',
return_value='')
def setUp(self, _):
super(GuestAgentMongoDBManagerTest, self).setUp()
self.context = context.TroveContext()

View File

@ -32,6 +32,9 @@ class RedisGuestAgentManagerTest(trove_testtools.TestCase):
_build_admin_client=DEFAULT, _init_overrides_dir=DEFAULT)
def setUp(self, *args, **kwargs):
super(RedisGuestAgentManagerTest, self).setUp()
self.patch_ope = patch('os.path.expanduser')
self.mock_ope = self.patch_ope.start()
self.addCleanup(self.patch_ope.stop)
self.context = TroveContext()
self.manager = RedisManager()
self.packages = 'redis-server'
@ -41,8 +44,6 @@ class RedisGuestAgentManagerTest(trove_testtools.TestCase):
self.origin_install_redis = redis_service.RedisApp._install_redis
self.origin_install_if_needed = \
redis_service.RedisApp.install_if_needed
self.origin_complete_install_or_restart = \
redis_service.RedisApp.complete_install_or_restart
self.origin_format = VolumeDevice.format
self.origin_mount = VolumeDevice.mount
self.origin_mount_points = VolumeDevice.mount_points
@ -68,8 +69,6 @@ class RedisGuestAgentManagerTest(trove_testtools.TestCase):
redis_service.RedisApp._install_redis = self.origin_install_redis
redis_service.RedisApp.install_if_needed = \
self.origin_install_if_needed
redis_service.RedisApp.complete_install_or_restart = \
self.origin_complete_install_or_restart
VolumeDevice.format = self.origin_format
VolumeDevice.mount = self.origin_mount
VolumeDevice.mount_points = self.origin_mount_points
@ -169,6 +168,8 @@ class RedisGuestAgentManagerTest(trove_testtools.TestCase):
self.manager.stop_db(self.context)
redis_mock.assert_any_call(do_not_start_on_reboot=False)
@patch.object(redis_service.RedisApp, '_init_overrides_dir',
return_value='')
@patch.object(backup, 'backup')
@patch.object(configuration.ConfigurationManager, 'parse_configuration',
MagicMock(return_value={'dir': '/var/lib/redis',

View File

@ -13,8 +13,8 @@
from mock import MagicMock
from mock import patch
from os import path
from testtools.matchers import Is
import trove_testtools
from trove.common.context import TroveContext
from trove.common.exception import DatastoreOperationNotSupported
@ -26,6 +26,7 @@ from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
from trove.guestagent import dbaas
from trove.guestagent import volume
from trove.guestagent.volume import VolumeDevice
from trove.tests.unittests import trove_testtools
class GuestAgentManagerTest(trove_testtools.TestCase):
@ -45,7 +46,6 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
self.origin_start_db = VerticaApp.start_db
self.origin_restart = VerticaApp.restart
self.origin_install_if = VerticaApp.install_if_needed
self.origin_complete_install = VerticaApp.complete_install_or_restart
self.origin_enable_root = VerticaApp.enable_root
self.origin_is_root_enabled = VerticaApp.is_root_enabled
self.origin_prepare_for_install_vertica = (
@ -65,7 +65,6 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
VerticaApp.start_db = self.origin_start_db
VerticaApp.restart = self.origin_restart
VerticaApp.install_if_needed = self.origin_install_if
VerticaApp.complete_install_or_restart = self.origin_complete_install
VerticaApp.enable_root = self.origin_enable_root
VerticaApp.is_root_enabled = self.origin_is_root_enabled
VerticaApp.prepare_for_install_vertica = (
@ -77,6 +76,7 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
@patch.object(path, 'exists', MagicMock())
def _prepare_dynamic(self, packages,
config_content='MockContent', device_path='/dev/vdb',
backup_id=None,
@ -94,7 +94,6 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
self.manager.appStatus = mock_status
mock_status.begin_install = MagicMock(return_value=None)
path_exists_function = MagicMock(return_value=True)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
@ -108,7 +107,6 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
VerticaApp.install_vertica = MagicMock(return_value=None)
VerticaApp.create_db = MagicMock(return_value=None)
VerticaApp.prepare_for_install_vertica = MagicMock(return_value=None)
VerticaApp.complete_install_or_restart = MagicMock(return_value=None)
# invocation
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
@ -118,8 +116,7 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
mount_point="/var/lib/vertica",
backup_info=backup_info,
overrides=None,
cluster_config=None,
path_exists_function=path_exists_function)
cluster_config=None)
self.assertEqual(expected_vol_count, VolumeDevice.format.call_count)
self.assertEqual(expected_vol_count,
@ -135,7 +132,6 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
VerticaApp.prepare_for_install_vertica.assert_any_call()
VerticaApp.install_vertica.assert_any_call()
VerticaApp.create_db.assert_any_call()
VerticaApp.complete_install_or_restart.assert_any_call()
def test_prepare_pkg(self):
self._prepare_dynamic(['vertica'])
@ -204,7 +200,7 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
def test_prepare_member(self, mock_set_status):
self._prepare_method("test-instance-3", "member")
mock_set_status.assert_called_with(
rd_instance.ServiceStatuses.BUILD_PENDING)
rd_instance.ServiceStatuses.INSTANCE_READY, force=True)
def test_reset_configuration(self):
try:
@ -219,9 +215,11 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
@patch.object(VerticaAppStatus, 'set_status')
def test_prepare_invalid_cluster_config(self, mock_set_status):
self._prepare_method("test-instance-3", "query_router")
self.assertRaises(RuntimeError,
self._prepare_method,
"test-instance-3", "query_router")
mock_set_status.assert_called_with(
rd_instance.ServiceStatuses.FAILED)
rd_instance.ServiceStatuses.FAILED, force=True)
def test_get_filesystem_stats(self):
with patch.object(dbaas, 'get_filesystem_volume_stats'):
@ -250,14 +248,13 @@ class GuestAgentManagerTest(trove_testtools.TestCase):
test_resize_fs.assert_called_with('/var/lib/vertica')
def test_cluster_complete(self):
mock_status = MagicMock()
mock_status.set_status = MagicMock()
self.manager.appStatus = mock_status
mock_status._get_actual_db_status = MagicMock(
mock_set_status = MagicMock()
self.manager.appStatus.set_status = mock_set_status
self.manager.appStatus._get_actual_db_status = MagicMock(
return_value=rd_instance.ServiceStatuses.RUNNING)
self.manager.cluster_complete(self.context)
mock_status.set_status.assert_called_with(
rd_instance.ServiceStatuses.RUNNING)
mock_set_status.assert_called_with(
rd_instance.ServiceStatuses.RUNNING, force=True)
def test_get_public_keys(self):
with patch.object(VerticaApp, 'get_public_keys',

View File

@ -108,7 +108,7 @@ class MongoDbClusterTasksTest(trove_testtools.TestCase):
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.BUILD_PENDING
get_status.return_value) = ServiceStatuses.INSTANCE_READY
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertEqual(True, ret_val)

View File

@ -97,7 +97,7 @@ class PXCClusterTasksTest(trove_testtools.TestCase):
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.BUILD_PENDING
get_status.return_value) = ServiceStatuses.INSTANCE_READY
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)

View File

@ -99,7 +99,7 @@ class VerticaClusterTasksTest(trove_testtools.TestCase):
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.BUILD_PENDING
get_status.return_value) = ServiceStatuses.INSTANCE_READY
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)

View File

@ -15,6 +15,7 @@
import inspect
import mock
import os
import sys
import testtools
@ -31,12 +32,21 @@ class TestCase(testtools.TestCase):
# Number of nested levels to examine when searching for mocks.
# Higher setting will potentially uncover more dangling objects,
# at the cost of increased scanning time.
cls._max_recursion_depth = 2
cls._fail_fast = False # Skip remaining tests after the first failure.
cls._only_unique = True # Report only unique dangling mock references.
cls._max_recursion_depth = int(os.getenv(
'TROVE_TESTS_UNMOCK_RECURSION_DEPTH', 2))
# Should we skip the remaining tests after the first failure.
cls._fail_fast = cls.is_bool(os.getenv(
'TROVE_TESTS_UNMOCK_FAIL_FAST', False))
# Should we report only unique dangling mock references.
cls._only_unique = cls.is_bool(os.getenv(
'TROVE_TESTS_UNMOCK_ONLY_UNIQUE', True))
cls._dangling_mocks = set()
@classmethod
def is_bool(cls, val):
return str(val).lower() in ['true', '1', 't', 'y', 'yes', 'on', 'set']
def setUp(self):
if self.__class__._fail_fast and self.__class__._dangling_mocks:
self.skipTest("This test suite already has dangling mock "