Merge "Add support for PXC 5.7 and xtrabackup 2.4"

This commit is contained in:
Zuul 2018-03-19 12:46:40 +00:00 committed by Gerrit Code Review
commit 9cac8b8521
8 changed files with 133 additions and 43 deletions

View File

@ -15,6 +15,11 @@ from charmhelpers.core.hookenv import (
config,
)
from charmhelpers.core.host import (
CompareHostReleases,
lsb_release,
)
from percona_utils import (
pause_unit_helper,
resume_unit_helper,
@ -61,11 +66,14 @@ def backup(args):
if incremental:
optionlist.append("--incremental")
# xtrabackup 2.4 (introduced in Bionic) doesn't support compact backups
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
optionlist.append("--compact")
try:
subprocess.check_call(
['innobackupex', '--compact', '--galera-info', '--rsync',
basedir, '--user=sstuser',
'--password={}'.format(sstpw)] + optionlist)
['innobackupex', '--galera-info', '--rsync', basedir,
'--user=sstuser', '--password={}'.format(sstpw)] + optionlist)
action_set({
'time-completed': (strftime("%Y-%m-%d %H:%M:%S", gmtime())),
'outcome': 'Success'}

View File

@ -22,6 +22,8 @@ import six
# from string import upper
from charmhelpers.core.host import (
CompareHostReleases,
lsb_release,
mkdir,
pwgen,
write_file
@ -57,15 +59,6 @@ except ImportError:
import MySQLdb
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account fails when
# using SET PASSWORD so using UPDATE against the mysql.user table is needed,
# but changes to this table are not replicated across the cluster, so this
# update needs to run in all the nodes.
# More info at http://galeracluster.com/documentation-webpages/userchanges.html
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = PASSWORD( %s ) "
"WHERE user = %s;")
class MySQLSetPasswordError(Exception):
pass
@ -311,6 +304,21 @@ class MySQLHelper(object):
'leader settings (%s)') % ex, ex)
try:
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
# fails when using SET PASSWORD so using UPDATE against the
# mysql.user table is needed, but changes to this table are not
# replicated across the cluster, so this update needs to run in
# all the nodes. More info at
# http://galeracluster.com/documentation-webpages/userchanges.html
release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
if release < 'bionic':
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "
"PASSWORD( %s ) WHERE user = %s;")
else:
# PXC 5.7 (introduced in Bionic) uses authentication_string
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
"authentication_string = "
"PASSWORD( %s ) WHERE user = %s;")
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
cursor.execute('FLUSH PRIVILEGES;')
self.connection.commit()

View File

@ -2,7 +2,7 @@
# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
# by default.
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython')
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'dnspython')
check_and_install() {
pkg="${1}-${2}"

View File

@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.host import (
service_restart,
service_start,
service_running,
file_hash,
lsb_release,
CompareHostReleases,
@ -206,9 +207,20 @@ def render_config(clustered=False, hosts=None):
if wsrep_provider_options:
context['wsrep_provider_options'] = wsrep_provider_options
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
# myisam_recover is not valid for PXC 5.7 (introduced in Bionic) so we
# only set it for PXC 5.6.
context['myisam_recover'] = 'BACKUP'
context['wsrep_provider'] = '/usr/lib/libgalera_smm.so'
elif CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
context['wsrep_provider'] = '/usr/lib/galera3/libgalera_smm.so'
context['default_storage_engine'] = 'InnoDB'
context['wsrep_log_conflicts'] = True
context['innodb_autoinc_lock_mode'] = '2'
context['pxc_strict_mode'] = 'ENFORCING'
context.update(PerconaClusterHelper().parse_config())
render(os.path.basename(config_file),
config_file, context, perms=0o444)
render(os.path.basename(config_file), config_file, context, perms=0o444)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
@ -239,7 +251,14 @@ def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
# relation id exists yet.
notify_bootstrapped()
update_db_rels = True
else:
elif not service_running('mysql@bootstrap'):
# NOTE(jamespage):
# if mysql@bootstrap is running, then the native
# bootstrap systemd service was used to start this
# instance, and it was the initial seed unit
# so don't try start the mysql.service unit;
# this also deals with seed units after they have been
# rebooted and mysqld was started by mysql.service.
delay = 1
attempts = 0
max_retries = 5

View File

@ -77,8 +77,6 @@ SEEDED_MARKER = "{data_dir}/seeded"
HOSTS_FILE = '/etc/hosts'
DEFAULT_MYSQL_PORT = 3306
WSREP_FILE = "/etc/mysql/percona-xtradb-cluster.conf.d/wsrep.cnf"
# NOTE(ajkavanagh) - this is 'required' for the pause/resume code for
# maintenance mode, but is currently not populated as the
# charm_check_function() checks whether the unit is working properly.
@ -122,8 +120,10 @@ def determine_packages():
# NOTE(beisner): Use recommended mysql-client package
# https://launchpad.net/bugs/1476845
# https://launchpad.net/bugs/1571789
# NOTE(coreycb): This will install percona-xtradb-cluster-server-5.6
# for >= wily and percona-xtradb-cluster-server-5.7 for >= bionic.
return [
'percona-xtradb-cluster-server-5.6',
'percona-xtradb-cluster-server',
]
else:
return [
@ -257,12 +257,12 @@ def get_cluster_hosts():
return hosts
SQL_SST_USER_SETUP = ("GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* "
"TO 'sstuser'@'localhost' IDENTIFIED BY '{}'")
SQL_SST_USER_SETUP = ("GRANT {permissions} ON *.* "
"TO 'sstuser'@'localhost' IDENTIFIED BY '{password}'")
SQL_SST_USER_SETUP_IPV6 = ("GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT "
SQL_SST_USER_SETUP_IPV6 = ("GRANT {permissions} "
"ON *.* TO 'sstuser'@'ip6-localhost' IDENTIFIED "
"BY '{}'")
"BY '{password}'")
def get_db_helper():
@ -273,10 +273,25 @@ def get_db_helper():
def configure_sstuser(sst_password):
# xtrabackup 2.4 (introduced in Bionic) needs PROCESS privilege for backups
permissions = [
"RELOAD",
"LOCK TABLES",
"REPLICATION CLIENT"
]
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'bionic':
permissions.append('PROCESS')
m_helper = get_db_helper()
m_helper.connect(password=m_helper.get_mysql_root_password())
m_helper.execute(SQL_SST_USER_SETUP.format(sst_password))
m_helper.execute(SQL_SST_USER_SETUP_IPV6.format(sst_password))
m_helper.execute(SQL_SST_USER_SETUP.format(
permissions=','.join(permissions),
password=sst_password)
)
m_helper.execute(SQL_SST_USER_SETUP_IPV6.format(
permissions=','.join(permissions),
password=sst_password)
)
# TODO: mysql charmhelper
@ -460,21 +475,28 @@ def bootstrap_pxc():
bootstrapped = service('bootstrap-pxc', 'mysql')
if not bootstrapped:
try:
# NOTE(jamespage): execute under systemd-run to ensure
# that the bootstrap-pxc mysqld does
# not end up in the juju unit daemons
# cgroup scope.
cmd = ['systemd-run', '--service-type=forking',
'service', 'mysql', 'bootstrap-pxc']
subprocess.check_call(cmd)
cmp_os = CompareHostReleases(
lsb_release()['DISTRIB_CODENAME']
)
if cmp_os < 'bionic':
# NOTE(jamespage): execute under systemd-run to ensure
# that the bootstrap-pxc mysqld does
# not end up in the juju unit daemons
# cgroup scope.
cmd = ['systemd-run', '--service-type=forking',
'service', 'mysql', 'bootstrap-pxc']
subprocess.check_call(cmd)
else:
service('start', 'mysql@bootstrap')
except subprocess.CalledProcessError as e:
msg = 'Bootstrap PXC failed'
error_msg = '{}: {}'.format(msg, e)
status_set('blocked', msg)
log(error_msg, ERROR)
raise Exception(error_msg)
# To make systemd aware mysql is running after a bootstrap
service('start', 'mysql')
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) < 'bionic':
# To make systemd aware mysql is running after a bootstrap
service('start', 'mysql')
log("Bootstrap PXC Succeeded", DEBUG)
@ -627,6 +649,12 @@ def services():
@returns [services] - list of strings that are service names.
"""
# NOTE(jamespage): Native systemd variants of the packagin
# use mysql@bootstrap to seed the cluster
# however this is cleared after a reboot,
# so dynamically check to see if this active
if service('is-active', 'mysql@bootstrap'):
return ['mysql@bootstrap']
return ['mysql']

View File

@ -34,9 +34,11 @@ max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
{% if myisam_recover -%}
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
myisam-recover = {{ myisam_recover }}
{% endif %}
{% if max_connections != -1 -%}
max_connections = {{ max_connections }}
@ -47,6 +49,11 @@ max_connections = {{ max_connections }}
wait_timeout = {{ wait_timeout }}
{% endif %}
{% if pxc_strict_mode -%}
# Avoid use of experimental and unsupported features in PXC
pxc_strict_mode = {{ pxc_strict_mode }}
{% endif %}
#
# * Query Cache Configuration
#
@ -75,6 +82,14 @@ max_binlog_size = {{ binlogs_max_size }}
# Required to allow trigger creation for openstack services
log_bin_trust_function_creators = 1
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW
{% if default_storage_engine -%}
# Default storage engine
default_storage_engine = {{ default_storage_engine }}
{% endif %}
#
# * InnoDB
#
@ -104,10 +119,16 @@ innodb_change_buffering = {{ innodb_change_buffering }}
innodb_io_capacity = {{ innodb_io_capacity }}
{% endif %}
{% if innodb_autoinc_lock_mode -%}
# InnoDB AUTO_INCREMENT Lock Mode
innodb_autoinc_lock_mode = {{ innodb_autoinc_lock_mode }}
{% endif %}
#
# * Galera
#
wsrep_provider=/usr/lib/libgalera_smm.so
wsrep_provider={{ wsrep_provider }}
# Add address of other cluster nodes here
{% if not clustered and is_leader -%}
@ -131,6 +152,11 @@ wsrep_cluster_name={{ cluster_name }}
# Authentication for SST method
wsrep_sst_auth="sstuser:{{ sst_password }}"
{% if wsrep_log_conflicts -%}
# Log additional information about conflicts
wsrep_log_conflicts
{% endif %}
{% if wsrep_provider_options -%}
wsrep_provider_options = {{ wsrep_provider_options }}
{% endif %}

View File

@ -206,13 +206,13 @@ class UtilsTests(unittest.TestCase):
def test_packages_eq_wily(self, mock_lsb_release):
mock_lsb_release.return_value = {'DISTRIB_CODENAME': 'wily'}
self.assertEqual(percona_utils.determine_packages(),
['percona-xtradb-cluster-server-5.6'])
['percona-xtradb-cluster-server'])
@mock.patch.object(percona_utils, 'lsb_release')
def test_packages_gt_wily(self, mock_lsb_release):
mock_lsb_release.return_value = {'DISTRIB_CODENAME': 'xenial'}
self.assertEqual(percona_utils.determine_packages(),
['percona-xtradb-cluster-server-5.6'])
['percona-xtradb-cluster-server'])
@mock.patch.object(percona_utils, 'lsb_release')
def test_packages_lt_wily(self, mock_lsb_release):
@ -363,19 +363,20 @@ class UtilsTestsCTC(CharmTestCase):
)
application_version_set.assert_called_with('5.6.17')
@mock.patch.object(percona_utils, 'REQUIRED_INTERFACES')
@mock.patch.object(percona_utils, 'services')
@mock.patch.object(percona_utils, 'REQUIRED_INTERFACES')
@mock.patch.object(percona_utils, 'make_assess_status_func')
def test_assess_status_func(self,
make_assess_status_func,
services,
REQUIRED_INTERFACES):
services.return_value = 's1'
REQUIRED_INTERFACES,
services):
services.return_value = ['mysql']
percona_utils.assess_status_func('test-config')
# ports=None whilst port checks are disabled.
make_assess_status_func.assert_called_once_with(
'test-config', REQUIRED_INTERFACES, charm_func=mock.ANY,
services='s1', ports=None)
services=['mysql'], ports=None)
services.assert_called_once()
def test_pause_unit_helper(self):
with mock.patch.object(percona_utils, '_pause_resume_helper') as prh: