[jamespage, r=thedac] Fix Bug:1481362 Make datadir dynamic depending on ubuntu version

This commit is contained in:
David Ames 2016-01-14 16:04:57 -08:00
commit b61bc2e674
3 changed files with 145 additions and 11 deletions

View File

@ -45,7 +45,6 @@ from charmhelpers.contrib.peerstorage import (
)
from percona_utils import (
determine_packages,
MY_CNF,
setup_percona_repo,
get_host_ip,
get_cluster_hosts,
@ -62,6 +61,7 @@ from percona_utils import (
is_bootstrapped,
get_wsrep_value,
assess_status,
resolve_cnf_file,
)
from charmhelpers.contrib.database.mysql import (
PerconaClusterHelper,
@ -113,8 +113,8 @@ def install():
def render_config(clustered=False, hosts=[]):
if not os.path.exists(os.path.dirname(MY_CNF)):
os.makedirs(os.path.dirname(MY_CNF))
if not os.path.exists(os.path.dirname(resolve_cnf_file())):
os.makedirs(os.path.dirname(resolve_cnf_file()))
context = {
'cluster_name': 'juju_cluster',
@ -125,7 +125,7 @@ def render_config(clustered=False, hosts=[]):
'sst_password': config('sst-password'),
'innodb_file_per_table': config('innodb-file-per-table'),
'table_open_cache': config('table-open-cache'),
'lp1366997_workaround': config('lp1366997-workaround')
'lp1366997_workaround': config('lp1366997-workaround'),
}
if config('prefer-ipv6'):
@ -139,7 +139,8 @@ def render_config(clustered=False, hosts=[]):
context['ipv6'] = False
context.update(PerconaClusterHelper().parse_config())
render(os.path.basename(MY_CNF), MY_CNF, context, perms=0o444)
render(os.path.basename(resolve_cnf_file()),
resolve_cnf_file(), context, perms=0o444)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
@ -154,10 +155,10 @@ def render_config_restart_on_changed(clustered, hosts, bootstrap=False):
it is started so long as the new node to be added is guaranteed to have
been restarted so as to apply the new config.
"""
pre_hash = file_hash(MY_CNF)
pre_hash = file_hash(resolve_cnf_file())
render_config(clustered, hosts)
update_db_rels = False
if file_hash(MY_CNF) != pre_hash or bootstrap:
if file_hash(resolve_cnf_file()) != pre_hash or bootstrap:
if bootstrap:
service('bootstrap-pxc', 'mysql')
# NOTE(dosaboy): this will not actually do anything if no cluster

View File

@ -26,6 +26,7 @@ from charmhelpers.core.hookenv import (
WARNING,
ERROR,
status_set,
cached,
)
from charmhelpers.fetch import (
apt_install,
@ -47,8 +48,7 @@ from MySQLdb import (
KEY = "keys/repo.percona.com"
REPO = """deb http://repo.percona.com/apt {release} main
deb-src http://repo.percona.com/apt {release} main"""
MY_CNF = "/etc/mysql/my.cnf"
SEEDED_MARKER = "/var/lib/mysql/seeded"
SEEDED_MARKER = "{data_dir}/seeded"
HOSTS_FILE = '/etc/hosts'
@ -70,12 +70,13 @@ def determine_packages():
def seeded():
''' Check whether service unit is already seeded '''
return os.path.exists(SEEDED_MARKER)
return os.path.exists(SEEDED_MARKER.format(data_dir=resolve_data_dir()))
def mark_seeded():
''' Mark service unit as seeded '''
with open(SEEDED_MARKER, 'w') as seeded:
with open(SEEDED_MARKER.format(data_dir=resolve_data_dir()),
'w') as seeded:
seeded.write('done')
@ -394,3 +395,19 @@ def assess_status():
status_set('blocked', 'Unit is not in sync')
else:
status_set('active', 'Unit is ready')
@cached
def resolve_data_dir():
if lsb_release()['DISTRIB_CODENAME'] < 'vivid':
return '/var/lib/mysql'
else:
return '/var/lib/percona-xtradb-cluster'
@cached
def resolve_cnf_file():
if lsb_release()['DISTRIB_CODENAME'] < 'vivid':
return '/etc/mysql/my.cnf'
else:
return '/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf'

116
templates/mysqld.cnf Normal file
View File

@ -0,0 +1,116 @@
[mysqld]
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/percona-xtradb-cluster
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
#
# * Networking
#
{% if bind_address -%}
bind-address = {{ bind_address }}
{% else -%}
bind-address = 0.0.0.0
{% endif %}
#
# * Fine Tuning
#
key_buffer = {{ key_buffer }}
table_open_cache = {{ table_open_cache }}
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
{% if max_connections != -1 -%}
max_connections = {{ max_connections }}
{% endif %}
{% if wait_timeout != -1 -%}
# Seconds before clearing idle connections
wait_timeout = {{ wait_timeout }}
{% endif %}
#
# * Query Cache Configuration
#
query_cache_limit = 1M
query_cache_size = 16M
#
# * Logging and Replication
#
#
# Error log - should be very few entries.
#
log_error = /var/log/mysql/error.log
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
expire_logs_days = 10
max_binlog_size = 100M
#
# * InnoDB
#
{% if innodb_file_per_table -%}
# This enables storing InnoDB tables in separate .ibd files. Note that, however
# existing InnoDB tables will remain in ibdata file(s) unles OPTIMIZE is run
# on them. Still, the ibdata1 file will NOT shrink - a full dump/import of the
# data is needed in order to get rid of large ibdata file.
innodb_file_per_table = 1
{% else -%}
innodb_file_per_table = 0
{% endif %}
innodb_buffer_pool_size = {{ innodb_buffer_pool_size }}
#
# * Galera
#
# Add address of other cluster nodes here
{% if not clustered -%}
# Empty gcomm address is being used when cluster is getting bootstrapped
wsrep_cluster_address=gcomm://
{% else -%}
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
wsrep_cluster_address=gcomm://{{ cluster_hosts }}
{% endif %}
#
# Node address
wsrep_node_address={{ private_address }}
#
# SST method
wsrep_sst_method={{ sst_method }}
#
# Cluster name
wsrep_cluster_name={{ cluster_name }}
#
# Authentication for SST method
wsrep_sst_auth="sstuser:{{ sst_password }}"
{% if wsrep_provider_options -%}
wsrep_provider_options = {{ wsrep_provider_options }}
{% endif %}
#
# * IPv6 SST configuration
#
{% if ipv6 -%}
[sst]
sockopt=,pf=ip6
{% endif %}