Redux amulet tests
Refactor to support direct use of BasicDeployment test class in all gate tests. As hacluster now defaults to using unicast transport, the configuration for multicast device address is no longer required and can be dropped, removing the need to specialize tests on a per series basis. Use min-cluster-size in tests to ensure that pxc clusters build out correctly. Refactor specific test cases into BasicDeployment test class so they get executed against all series, including kill mysqld test pause/resume tests Closes-Bug: 1546577 Change-Id: I239946808f68a0225b49c0327da2b4d35715b837
This commit is contained in:
parent
bda27479a4
commit
fd6097fcb2
|
@ -0,0 +1,122 @@
|
|||
# Copyright 2014-2017 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Charm helpers snap for classic charms.
|
||||
|
||||
If writing reactive charms, use the snap layer:
|
||||
https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
|
||||
"""
|
||||
import subprocess
|
||||
from os import environ
|
||||
from time import sleep
|
||||
from charmhelpers.core.hookenv import log
|
||||
|
||||
__author__ = 'Joseph Borg <joseph.borg@canonical.com>'
|
||||
|
||||
SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved).
|
||||
SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
|
||||
SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||
|
||||
|
||||
class CouldNotAcquireLockException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _snap_exec(commands):
|
||||
"""
|
||||
Execute snap commands.
|
||||
|
||||
:param commands: List commands
|
||||
:return: Integer exit code
|
||||
"""
|
||||
assert type(commands) == list
|
||||
|
||||
retry_count = 0
|
||||
return_code = None
|
||||
|
||||
while return_code is None or return_code == SNAP_NO_LOCK:
|
||||
try:
|
||||
return_code = subprocess.check_call(['snap'] + commands, env=environ)
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count += + 1
|
||||
if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
|
||||
raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT)
|
||||
return_code = e.returncode
|
||||
log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN')
|
||||
sleep(SNAP_NO_LOCK_RETRY_DELAY)
|
||||
|
||||
return return_code
|
||||
|
||||
|
||||
def snap_install(packages, *flags):
|
||||
"""
|
||||
Install a snap package.
|
||||
|
||||
:param packages: String or List String package name
|
||||
:param flags: List String flags to pass to install command
|
||||
:return: Integer return code from snap
|
||||
"""
|
||||
if type(packages) is not list:
|
||||
packages = [packages]
|
||||
|
||||
flags = list(flags)
|
||||
|
||||
message = 'Installing snap(s) "%s"' % ', '.join(packages)
|
||||
if flags:
|
||||
message += ' with option(s) "%s"' % ', '.join(flags)
|
||||
|
||||
log(message, level='INFO')
|
||||
return _snap_exec(['install'] + flags + packages)
|
||||
|
||||
|
||||
def snap_remove(packages, *flags):
|
||||
"""
|
||||
Remove a snap package.
|
||||
|
||||
:param packages: String or List String package name
|
||||
:param flags: List String flags to pass to remove command
|
||||
:return: Integer return code from snap
|
||||
"""
|
||||
if type(packages) is not list:
|
||||
packages = [packages]
|
||||
|
||||
flags = list(flags)
|
||||
|
||||
message = 'Removing snap(s) "%s"' % ', '.join(packages)
|
||||
if flags:
|
||||
message += ' with options "%s"' % ', '.join(flags)
|
||||
|
||||
log(message, level='INFO')
|
||||
return _snap_exec(['remove'] + flags + packages)
|
||||
|
||||
|
||||
def snap_refresh(packages, *flags):
|
||||
"""
|
||||
Refresh / Update snap package.
|
||||
|
||||
:param packages: String or List String package name
|
||||
:param flags: List String flags to pass to refresh command
|
||||
:return: Integer return code from snap
|
||||
"""
|
||||
if type(packages) is not list:
|
||||
packages = [packages]
|
||||
|
||||
flags = list(flags)
|
||||
|
||||
message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
|
||||
if flags:
|
||||
message += ' with options "%s"' % ', '.join(flags)
|
||||
|
||||
log(message, level='INFO')
|
||||
return _snap_exec(['refresh'] + flags + packages)
|
|
@ -116,8 +116,8 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||
}
|
||||
|
||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
|
||||
CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times.
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
|
@ -249,7 +249,8 @@ def add_source(source, key=None):
|
|||
source.startswith('http') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
cmd = ['add-apt-repository', '--yes', source]
|
||||
_run_with_retries(cmd)
|
||||
elif source.startswith('cloud:'):
|
||||
install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
|
@ -286,41 +287,60 @@ def add_source(source, key=None):
|
|||
key])
|
||||
|
||||
|
||||
def _run_apt_command(cmd, fatal=False):
|
||||
"""Run an APT command.
|
||||
|
||||
Checks the output and retries if the fatal flag is set
|
||||
to True.
|
||||
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
|
||||
retry_message="", cmd_env=None):
|
||||
"""Run a command and retry until success or max_retries is reached.
|
||||
|
||||
:param: cmd: str: The apt command to run.
|
||||
:param: max_retries: int: The number of retries to attempt on a fatal
|
||||
command. Defaults to CMD_RETRY_COUNT.
|
||||
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
|
||||
Defaults to retry on exit code 1.
|
||||
:param: retry_message: str: Optional log prefix emitted during retries.
|
||||
:param: cmd_env: dict: Environment variables to add to the command run.
|
||||
"""
|
||||
|
||||
env = os.environ.copy()
|
||||
if cmd_env:
|
||||
env.update(cmd_env)
|
||||
|
||||
if not retry_message:
|
||||
retry_message = "Failed executing '{}'".format(" ".join(cmd))
|
||||
retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
|
||||
|
||||
retry_count = 0
|
||||
result = None
|
||||
|
||||
retry_results = (None,) + retry_exitcodes
|
||||
while result in retry_results:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > max_retries:
|
||||
raise
|
||||
result = e.returncode
|
||||
log(retry_message)
|
||||
time.sleep(CMD_RETRY_DELAY)
|
||||
|
||||
|
||||
def _run_apt_command(cmd, fatal=False):
|
||||
"""Run an apt command with optional retries.
|
||||
|
||||
:param: fatal: bool: Whether the command's output should be checked and
|
||||
retried.
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
# Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
|
||||
cmd_env = {
|
||||
'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
|
||||
|
||||
if fatal:
|
||||
retry_count = 0
|
||||
result = None
|
||||
|
||||
# If the command is considered "fatal", we need to retry if the apt
|
||||
# lock was not acquired.
|
||||
|
||||
while result is None or result == APT_NO_LOCK:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||
raise
|
||||
result = e.returncode
|
||||
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
||||
"".format(APT_NO_LOCK_RETRY_DELAY))
|
||||
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
||||
|
||||
_run_with_retries(
|
||||
cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
|
||||
retry_message="Couldn't acquire DPKG lock")
|
||||
else:
|
||||
env = os.environ.copy()
|
||||
env.update(cmd_env)
|
||||
subprocess.call(cmd, env=env)
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# basic deployment test class for percona-xtradb-cluster
|
||||
|
||||
import amulet
|
||||
import re
|
||||
import os
|
||||
|
@ -8,9 +10,13 @@ import yaml
|
|||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
)
|
||||
from charmhelpers.contrib.amulet.utils import AmuletUtils
|
||||
|
||||
|
||||
class BasicDeployment(OpenStackAmuletDeployment):
|
||||
|
||||
utils = AmuletUtils()
|
||||
|
||||
def __init__(self, vip=None, units=1, series="trusty", openstack=None,
|
||||
source=None, stable=False):
|
||||
super(BasicDeployment, self).__init__(series, openstack, source,
|
||||
|
@ -33,6 +39,7 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
("Please set the vip in local.yaml or "
|
||||
"env var AMULET_OS_VIP to run this test "
|
||||
"suite"))
|
||||
self.log = self.utils.get_logger()
|
||||
|
||||
def _add_services(self):
|
||||
"""Add services
|
||||
|
@ -60,20 +67,21 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'min-cluster-size': self.units,
|
||||
'vip': self.vip}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
configs = {}
|
||||
if self.units > 1:
|
||||
cfg_ha['cluster_count'] = str(self.units)
|
||||
configs['hacluster'] = cfg_ha
|
||||
configs['percona-cluster'] = cfg_percona
|
||||
|
||||
return configs
|
||||
|
||||
|
@ -86,7 +94,23 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
self._configure_services()
|
||||
self._deploy()
|
||||
self.d.sentry.wait()
|
||||
self.test_deployment()
|
||||
|
||||
def test_deployment(self):
|
||||
'''Top level test function executor'''
|
||||
self.test_pacemaker()
|
||||
self.test_pxc_running()
|
||||
self.test_bootstrapped_and_clustered()
|
||||
self.test_pause_resume()
|
||||
self.test_kill_master()
|
||||
|
||||
def test_pacemaker(self):
|
||||
'''
|
||||
Ensure that pacemaker and corosync are correctly configured in
|
||||
clustered deployments.
|
||||
|
||||
side effect: self.master_unit should be set after execution
|
||||
'''
|
||||
if self.units > 1:
|
||||
i = 0
|
||||
while i < 30 and not self.master_unit:
|
||||
|
@ -108,9 +132,87 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
else:
|
||||
self.master_unit = self.find_master(ha=False)
|
||||
|
||||
def test_pxc_running(self):
|
||||
'''
|
||||
Ensure PXC is running on all units
|
||||
'''
|
||||
for unit in self.d.sentry['percona-cluster']:
|
||||
assert self.is_mysqld_running(unit), 'mysql not running: %s' % unit
|
||||
|
||||
def test_bootstrapped_and_clustered(self):
|
||||
'''
|
||||
Ensure PXC is bootstrapped and that peer units are clustered
|
||||
'''
|
||||
self.log.info('Ensuring PXC is bootstrapped')
|
||||
msg = "Percona cluster failed to bootstrap"
|
||||
assert self.is_pxc_bootstrapped(), msg
|
||||
|
||||
self.log.info('Checking PXC cluster size == {}'.format(self.units))
|
||||
got = int(self.get_cluster_size())
|
||||
msg = ("Percona cluster unexpected size"
|
||||
" (wanted=%s, got=%s)" % (self.units, got))
|
||||
assert got == self.units, msg
|
||||
|
||||
def test_pause_resume(self):
|
||||
'''
|
||||
Ensure pasue/resume actions stop/start mysqld on units
|
||||
'''
|
||||
self.log.info('Testing pause/resume actions')
|
||||
self.log.info('Pausing service on first PXC unit')
|
||||
unit = self.d.sentry['percona-cluster'][0]
|
||||
assert self.is_mysqld_running(unit), 'mysql not running'
|
||||
assert self.utils.status_get(unit)[0] == "active"
|
||||
|
||||
action_id = self.utils.run_action(unit, "pause")
|
||||
assert self.utils.wait_on_action(action_id), "Pause action failed."
|
||||
|
||||
# Note that is_mysqld_running will print an error message when
|
||||
# mysqld is not running. This is by design but it looks odd
|
||||
# in the output.
|
||||
assert not self.is_mysqld_running(unit=unit), \
|
||||
"mysqld is still running!"
|
||||
|
||||
self.log.info('Resuming service on first PXC unit')
|
||||
assert self.utils.status_get(unit)[0] == "maintenance"
|
||||
action_id = self.utils.run_action(unit, "resume")
|
||||
assert self.utils.wait_on_action(action_id), "Resume action failed"
|
||||
assert self.utils.status_get(unit)[0] == "active"
|
||||
assert self.is_mysqld_running(unit=unit), \
|
||||
"mysqld not running after resume."
|
||||
|
||||
def test_kill_master(self):
|
||||
'''
|
||||
Ensure that killing the mysqld on the master unit results
|
||||
in a VIP failover
|
||||
'''
|
||||
self.log.info('Testing failover of master unit on mysqld failure')
|
||||
# we are going to kill the master
|
||||
old_master = self.master_unit
|
||||
self.log.info(
|
||||
'kill -9 mysqld on {}'.format(self.master_unit.info['unit_name'])
|
||||
)
|
||||
self.master_unit.run('sudo killall -9 mysqld')
|
||||
|
||||
self.log.info('looking for the new master')
|
||||
i = 0
|
||||
changed = False
|
||||
while i < 10 and not changed:
|
||||
i += 1
|
||||
time.sleep(5) # give some time to pacemaker to react
|
||||
new_master = self.find_master()
|
||||
|
||||
if (new_master and new_master.info['unit_name'] !=
|
||||
old_master.info['unit_name']):
|
||||
self.log.info(
|
||||
'New master unit detected'
|
||||
' on {}'.format(new_master.info['unit_name'])
|
||||
)
|
||||
changed = True
|
||||
|
||||
assert changed, "The master didn't change"
|
||||
|
||||
assert self.is_port_open(address=self.vip), 'cannot connect to vip'
|
||||
|
||||
def find_master(self, ha=True):
|
||||
for unit in self.d.sentry['percona-cluster']:
|
||||
if not ha:
|
||||
|
@ -118,11 +220,13 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
|
||||
# is the vip running here?
|
||||
output, code = unit.run('sudo ip a | grep "inet %s/"' % self.vip)
|
||||
print('---')
|
||||
print(unit)
|
||||
print(output)
|
||||
self.log.info("Checking {}".format(unit.info['unit_name']))
|
||||
self.log.debug(output)
|
||||
if code == 0:
|
||||
print('vip(%s) running in %s' % (self.vip, unit))
|
||||
self.log.info('vip ({}) running in {}'.format(
|
||||
self.vip,
|
||||
unit.info['unit_name'])
|
||||
)
|
||||
return unit
|
||||
|
||||
def get_pcmkr_resources(self, unit=None):
|
||||
|
@ -145,7 +249,7 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
|
||||
_, code = u.run('pidof mysqld')
|
||||
if code != 0:
|
||||
print("ERROR: command returned non-zero '%s'" % (code))
|
||||
self.log.debug("command returned non-zero '%s'" % (code))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -160,11 +264,11 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
"grep %s" % (attr, attr))
|
||||
output, code = u.run(cmd)
|
||||
if code != 0:
|
||||
print("ERROR: command returned non-zero '%s'" % (code))
|
||||
self.log.debug("command returned non-zero '%s'" % (code))
|
||||
return ""
|
||||
|
||||
value = re.search(r"^.+?\s+(.+)", output).group(1)
|
||||
print("%s = %s" % (attr, value))
|
||||
self.log.info("%s = %s" % (attr, value))
|
||||
return value
|
||||
|
||||
def is_pxc_bootstrapped(self, unit=None):
|
||||
|
@ -187,8 +291,9 @@ class BasicDeployment(OpenStackAmuletDeployment):
|
|||
return True
|
||||
except socket.error as e:
|
||||
if e.errno == 113:
|
||||
print("ERROR: could not connect to %s:%s" % (addr, port))
|
||||
self.log.error("could not connect to %s:%s" % (addr, port))
|
||||
if e.errno == 111:
|
||||
print("ERROR: connection refused connecting to %s:%s" % (addr,
|
||||
port))
|
||||
self.log.error("connection refused connecting"
|
||||
" to %s:%s" % (addr,
|
||||
port))
|
||||
return False
|
||||
|
|
|
@ -785,37 +785,30 @@ class AmuletUtils(object):
|
|||
generating test messages which need to be unique-ish."""
|
||||
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||
|
||||
# amulet juju action helpers:
|
||||
# amulet juju action helpers:
|
||||
def run_action(self, unit_sentry, action,
|
||||
_check_output=subprocess.check_output,
|
||||
params=None):
|
||||
"""Run the named action on a given unit sentry.
|
||||
"""Translate to amulet's built in run_action(). Deprecated.
|
||||
|
||||
Run the named action on a given unit sentry.
|
||||
|
||||
params a dict of parameters to use
|
||||
_check_output parameter is used for dependency injection.
|
||||
_check_output parameter is no longer used
|
||||
|
||||
@return action_id.
|
||||
"""
|
||||
unit_id = unit_sentry.info["unit_name"]
|
||||
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||
if params is not None:
|
||||
for key, value in params.iteritems():
|
||||
command.append("{}={}".format(key, value))
|
||||
self.log.info("Running command: %s\n" % " ".join(command))
|
||||
output = _check_output(command, universal_newlines=True)
|
||||
data = json.loads(output)
|
||||
action_id = data[u'Action queued with id']
|
||||
return action_id
|
||||
self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
|
||||
'deprecated for amulet.run_action')
|
||||
return unit_sentry.run_action(action, action_args=params)
|
||||
|
||||
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||
"""Wait for a given action, returning if it completed or not.
|
||||
|
||||
_check_output parameter is used for dependency injection.
|
||||
action_id a string action uuid
|
||||
_check_output parameter is no longer used
|
||||
"""
|
||||
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
||||
action_id]
|
||||
output = _check_output(command, universal_newlines=True)
|
||||
data = json.loads(output)
|
||||
data = amulet.actions.get_action_output(action_id, full_output=True)
|
||||
return data.get(u"status") == "completed"
|
||||
|
||||
def status_get(self, unit):
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (3 nodes)
|
||||
|
||||
import basic_deployment
|
||||
import time
|
||||
|
||||
|
||||
class ThreeNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(ThreeNode, self).__init__(units=3)
|
||||
|
||||
def run(self):
|
||||
super(ThreeNode, self).run()
|
||||
# we are going to kill the master
|
||||
old_master = self.master_unit
|
||||
print('stopping mysql in %s' % str(self.master_unit.info))
|
||||
self.master_unit.run('sudo service mysql stop')
|
||||
|
||||
print('looking for the new master')
|
||||
i = 0
|
||||
changed = False
|
||||
while i < 10 and not changed:
|
||||
i += 1
|
||||
time.sleep(5) # give some time to pacemaker to react
|
||||
new_master = self.find_master()
|
||||
|
||||
if (new_master and new_master.info['unit_name'] !=
|
||||
old_master.info['unit_name']):
|
||||
changed = True
|
||||
|
||||
assert changed, "The master didn't change"
|
||||
|
||||
assert self.is_port_open(address=self.vip), 'cannot connect to vip'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = ThreeNode()
|
||||
t.run()
|
|
@ -1,38 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (3 nodes)
|
||||
|
||||
import basic_deployment
|
||||
import time
|
||||
|
||||
|
||||
class ThreeNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(ThreeNode, self).__init__(units=3)
|
||||
|
||||
def run(self):
|
||||
super(ThreeNode, self).run()
|
||||
# we are going to kill the master
|
||||
old_master = self.master_unit
|
||||
print('kill-9 mysqld in %s' % str(self.master_unit.info))
|
||||
self.master_unit.run('sudo killall -9 mysqld')
|
||||
|
||||
print('looking for the new master')
|
||||
i = 0
|
||||
changed = False
|
||||
while i < 10 and not changed:
|
||||
i += 1
|
||||
time.sleep(5) # give some time to pacemaker to react
|
||||
new_master = self.find_master()
|
||||
|
||||
if (new_master and new_master.info['unit_name'] !=
|
||||
old_master.info['unit_name']):
|
||||
changed = True
|
||||
|
||||
assert changed, "The master didn't change"
|
||||
|
||||
assert self.is_port_open(address=self.vip), 'cannot connect to vip'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = ThreeNode()
|
||||
t.run()
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (1 node)
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class MultiNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(MultiNode, self).__init__(units=3)
|
||||
|
||||
def _get_configs(self):
|
||||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'dataset-size': '512M',
|
||||
'vip': self.vip,
|
||||
'min-cluster-size': 3}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
if self.units > 1:
|
||||
configs['hacluster'] = cfg_ha
|
||||
|
||||
return configs
|
||||
|
||||
def run(self):
|
||||
super(MultiNode, self).run()
|
||||
msg = "Percona cluster failed to bootstrap"
|
||||
assert self.is_pxc_bootstrapped(), msg
|
||||
got = self.get_cluster_size()
|
||||
msg = "Percona cluster unexpected size (wanted=%s, got=%s)" % (3, got)
|
||||
assert got == '3', msg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = MultiNode()
|
||||
t.run()
|
|
@ -1,41 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (multi node)
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class MultiNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(MultiNode, self).__init__(units=2)
|
||||
|
||||
def _get_configs(self):
|
||||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'dataset-size': '512M',
|
||||
'vip': self.vip,
|
||||
'min-cluster-size': 3}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
if self.units > 1:
|
||||
configs['hacluster'] = cfg_ha
|
||||
|
||||
return configs
|
||||
|
||||
def run(self):
|
||||
super(MultiNode, self).run()
|
||||
got = self.get_cluster_size()
|
||||
msg = "Percona cluster unexpected size (wanted=%s, got=%s)" % (1, got)
|
||||
assert got in ('0', '1'), msg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = MultiNode()
|
||||
t.run()
|
|
@ -1,37 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster pause and resume
|
||||
|
||||
import basic_deployment
|
||||
from charmhelpers.contrib.amulet.utils import AmuletUtils
|
||||
|
||||
utils = AmuletUtils()
|
||||
|
||||
|
||||
class PauseResume(basic_deployment.BasicDeployment):
|
||||
|
||||
def run(self):
|
||||
super(PauseResume, self).run()
|
||||
unit = self.d.sentry['percona-cluster'][0]
|
||||
assert self.is_mysqld_running(unit), 'mysql not running'
|
||||
assert utils.status_get(unit)[0] == "active"
|
||||
|
||||
action_id = utils.run_action(unit, "pause")
|
||||
assert utils.wait_on_action(action_id), "Pause action failed."
|
||||
|
||||
# Note that is_mysqld_running will print an error message when
|
||||
# mysqld is not running. This is by design but it looks odd
|
||||
# in the output.
|
||||
assert not self.is_mysqld_running(unit=unit), \
|
||||
"mysqld is still running!"
|
||||
|
||||
assert utils.status_get(unit)[0] == "maintenance"
|
||||
action_id = utils.run_action(unit, "resume")
|
||||
assert utils.wait_on_action(action_id), "Resume action failed"
|
||||
assert utils.status_get(unit)[0] == "active"
|
||||
assert self.is_mysqld_running(unit=unit), \
|
||||
"mysqld not running after resume."
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
p = PauseResume()
|
||||
p.run()
|
|
@ -1,39 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (1 node) with pause and resume.
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
|
||||
OpenStackAmuletUtils,
|
||||
DEBUG,
|
||||
# ERROR
|
||||
)
|
||||
|
||||
import basic_deployment
|
||||
|
||||
|
||||
u = OpenStackAmuletUtils(DEBUG)
|
||||
|
||||
|
||||
class SingleNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(SingleNode, self).__init__(units=1)
|
||||
|
||||
def run(self):
|
||||
super(SingleNode, self).run()
|
||||
assert self.is_pxc_bootstrapped(), "Cluster not bootstrapped"
|
||||
sentry_unit = self.d.sentry['percona-cluster'][0]
|
||||
|
||||
assert u.status_get(sentry_unit)[0] == "active"
|
||||
|
||||
action_id = u.run_action(sentry_unit, "pause")
|
||||
assert u.wait_on_action(action_id), "Pause action failed."
|
||||
assert u.status_get(sentry_unit)[0] == "maintenance"
|
||||
|
||||
action_id = u.run_action(sentry_unit, "resume")
|
||||
assert u.wait_on_action(action_id), "Resume action failed."
|
||||
assert u.status_get(sentry_unit)[0] == "active"
|
||||
u.log.debug('OK')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = SingleNode()
|
||||
t.run()
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (3 nodes)
|
||||
|
||||
import basic_deployment
|
||||
import time
|
||||
|
||||
|
||||
class ThreeNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(ThreeNode, self).__init__(units=3)
|
||||
|
||||
def run(self):
|
||||
super(ThreeNode, self).run()
|
||||
# we are going to kill the master
|
||||
old_master = self.master_unit
|
||||
self.master_unit.run('sudo poweroff')
|
||||
|
||||
time.sleep(10) # give some time to pacemaker to react
|
||||
new_master = self.find_master()
|
||||
assert new_master is not None, "master unit not found"
|
||||
assert (new_master.info['public-address'] !=
|
||||
old_master.info['public-address'])
|
||||
|
||||
assert self.is_port_open(address=self.vip), 'cannot connect to vip'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = ThreeNode()
|
||||
t.run()
|
|
@ -0,0 +1,8 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import basic_deployment
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = basic_deployment.BasicDeployment(units=3, series='trusty')
|
||||
t.run()
|
|
@ -1,44 +1,8 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (multi node)
|
||||
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class MultiNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(MultiNode, self).__init__(units=3, series='xenial')
|
||||
|
||||
def _get_configs(self):
|
||||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'dataset-size': '512M',
|
||||
'vip': self.vip,
|
||||
'min-cluster-size': 3,
|
||||
'ha-bindiface': 'ens2'}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
if self.units > 1:
|
||||
configs['hacluster'] = cfg_ha
|
||||
|
||||
return configs
|
||||
|
||||
def run(self):
|
||||
super(MultiNode, self).run()
|
||||
msg = "Percona cluster failed to bootstrap"
|
||||
assert self.is_pxc_bootstrapped(), msg
|
||||
got = self.get_cluster_size()
|
||||
msg = "Percona cluster unexpected size (wanted=%s, got=%s)" % (3, got)
|
||||
assert got == '3', msg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = MultiNode()
|
||||
t = basic_deployment.BasicDeployment(units=3, series='xenial')
|
||||
t.run()
|
||||
|
|
|
@ -1,44 +1,8 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (multi node)
|
||||
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class MultiNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(MultiNode, self).__init__(units=3, series='yakkety')
|
||||
|
||||
def _get_configs(self):
|
||||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'dataset-size': '512M',
|
||||
'vip': self.vip,
|
||||
'min-cluster-size': 3,
|
||||
'ha-bindiface': 'ens2'}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
if self.units > 1:
|
||||
configs['hacluster'] = cfg_ha
|
||||
|
||||
return configs
|
||||
|
||||
def run(self):
|
||||
super(MultiNode, self).run()
|
||||
msg = "Percona cluster failed to bootstrap"
|
||||
assert self.is_pxc_bootstrapped(), msg
|
||||
got = self.get_cluster_size()
|
||||
msg = "Percona cluster unexpected size (wanted=%s, got=%s)" % (3, got)
|
||||
assert got == '3', msg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = MultiNode()
|
||||
t = basic_deployment.BasicDeployment(units=3, series='yakkety')
|
||||
t.run()
|
||||
|
|
|
@ -1,44 +1,8 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (multi node)
|
||||
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class MultiNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(MultiNode, self).__init__(units=3, series='zesty')
|
||||
|
||||
def _get_configs(self):
|
||||
"""Configure all of the services."""
|
||||
cfg_percona = {'sst-password': 'ubuntu',
|
||||
'root-password': 't00r',
|
||||
'dataset-size': '512M',
|
||||
'vip': self.vip,
|
||||
'min-cluster-size': 3,
|
||||
'ha-bindiface': 'ens2'}
|
||||
|
||||
cfg_ha = {'debug': True,
|
||||
'corosync_mcastaddr': '226.94.1.4',
|
||||
'corosync_key': ('xZP7GDWV0e8Qs0GxWThXirNNYlScgi3sRTdZk/IXKD'
|
||||
'qkNFcwdCWfRQnqrHU/6mb6sz6OIoZzX2MtfMQIDcXu'
|
||||
'PqQyvKuv7YbRyGHmQwAWDUA4ed759VWAO39kHkfWp9'
|
||||
'y5RRk/wcHakTcWYMwm70upDGJEP00YT3xem3NQy27A'
|
||||
'C1w=')}
|
||||
|
||||
configs = {'percona-cluster': cfg_percona}
|
||||
if self.units > 1:
|
||||
configs['hacluster'] = cfg_ha
|
||||
|
||||
return configs
|
||||
|
||||
def run(self):
|
||||
super(MultiNode, self).run()
|
||||
msg = "Percona cluster failed to bootstrap"
|
||||
assert self.is_pxc_bootstrapped(), msg
|
||||
got = self.get_cluster_size()
|
||||
msg = "Percona cluster unexpected size (wanted=%s, got=%s)" % (3, got)
|
||||
assert got == '3', msg
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = MultiNode()
|
||||
t = basic_deployment.BasicDeployment(units=3, series='zesty')
|
||||
t.run()
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (1 node)
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class SingleNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(SingleNode, self).__init__(units=1)
|
||||
|
||||
def run(self):
|
||||
super(SingleNode, self).run()
|
||||
assert self.is_pxc_bootstrapped(), "Cluster not bootstrapped"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = SingleNode()
|
||||
t.run()
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (1 node)
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class SingleNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(SingleNode, self).__init__(units=1, series='xenial')
|
||||
|
||||
def run(self):
|
||||
super(SingleNode, self).run()
|
||||
assert self.is_pxc_bootstrapped(), "Cluster not bootstrapped"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = SingleNode()
|
||||
t.run()
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# test percona-cluster (1 node)
|
||||
import basic_deployment
|
||||
|
||||
|
||||
class SingleNode(basic_deployment.BasicDeployment):
|
||||
def __init__(self):
|
||||
super(SingleNode, self).__init__(units=1, series='yakkety')
|
||||
|
||||
def run(self):
|
||||
super(SingleNode, self).run()
|
||||
assert self.is_pxc_bootstrapped(), "Cluster not bootstrapped"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = SingleNode()
|
||||
t.run()
|
Loading…
Reference in New Issue