OPSVM Changes - Ticket: [SOL-830]

- Getting OPSVM IP from director relation
- Making OPSVM specific changes
- Cleaned code in various functions
- Added restart_on_change decorater function that restarts plumgrid service only when there has been any change in the configuration files
- Removed restart of plumgrid service when only two directors are available
- Fixed unit tests accordingly
This commit is contained in:
Bilal Baqar 2016-03-28 11:18:41 -07:00
commit 89817b2437
9 changed files with 81 additions and 52 deletions

View File

@ -16,16 +16,23 @@ from socket import (
)
def _pg_dir_settings():
def _pg_dir_context():
'''
Inspects relation with PLUMgrid director.
'''
director_ips = []
ctxt = {
'opsvm_ip': '127.0.0.1',
'director_ips': [],
}
for rid in relation_ids('plumgrid'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
director_ips.append(str(get_host_ip(rdata['private-address'])))
return director_ips
ctxt['director_ips'
].append(str(get_host_ip(rdata['private-address'])))
if "opsvm_ip" in rdata:
ctxt['opsvm_ip'] = \
rdata['opsvm_ip']
return ctxt
class PGGwContext(context.NeutronContext):
@ -62,16 +69,16 @@ class PGGwContext(context.NeutronContext):
if not pg_ctxt:
return {}
pg_dir_ips = ''
pg_dir_settings = sorted(_pg_dir_settings())
single_ip = True
for ip in pg_dir_settings:
if single_ip:
pg_dir_ips = str(ip)
single_ip = False
else:
pg_dir_ips = pg_dir_ips + ',' + str(ip)
pg_ctxt['local_ip'] = pg_dir_ips
pg_dir_context = _pg_dir_context()
pg_dir_ips = sorted(pg_dir_context['director_ips'])
dir_count = len(pg_dir_ips)
pg_ctxt['director_ips_string'] = (str(pg_dir_ips[0]) + ',' +
str(pg_dir_ips[1]) + ',' +
str(pg_dir_ips[2])
if dir_count == 3 else
str(pg_dir_ips[0])
if dir_count == 1 else
'')
unit_hostname = gethostname()
pg_ctxt['pg_hostname'] = unit_hostname
pg_ctxt['pg_fqdn'] = getfqdn()
@ -85,5 +92,6 @@ class PGGwContext(context.NeutronContext):
pg_ctxt['label'] = unit_hostname
pg_ctxt['fabric_mode'] = 'host'
pg_ctxt['ext_interfaces'] = get_gw_interfaces()
pg_ctxt['opsvm_ip'] = pg_dir_context['opsvm_ip']
return pg_ctxt

View File

@ -18,7 +18,6 @@ from charmhelpers.core.host import service_running
from charmhelpers.fetch import (
apt_install,
apt_purge,
configure_sources,
)
@ -26,6 +25,7 @@ from pg_gw_utils import (
register_configs,
ensure_files,
restart_pg,
restart_map,
stop_pg,
determine_packages,
load_iovisor,
@ -34,6 +34,8 @@ from pg_gw_utils import (
add_lcm_key,
fabric_interface_changed,
load_iptables,
restart_on_change,
director_cluster_ready
)
hooks = Hooks()
@ -56,18 +58,16 @@ def install():
add_lcm_key()
@hooks.hook('plumgrid-relation-joined')
@hooks.hook('plumgrid-relation-changed')
def plumgrid_joined():
@restart_on_change(restart_map())
def plumgrid_changed():
'''
This hook is run when relation between plumgrid-gateway and
plumgrid-director is made.
'''
ensure_mtu()
ensure_files()
add_lcm_key()
CONFIGS.write_all()
restart_pg()
if director_cluster_ready():
ensure_mtu()
CONFIGS.write_all()
@hooks.hook('config-changed')
@ -105,12 +105,10 @@ def config_changed():
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map())
def upgrade_charm():
load_iptables()
ensure_mtu()
ensure_files()
CONFIGS.write_all()
restart_pg()
@hooks.hook('stop')
@ -119,10 +117,6 @@ def stop():
This hook is run when the charm is destroyed.
'''
stop_pg()
remove_iovisor()
pkgs = determine_packages()
for pkg in pkgs:
apt_purge(pkg, fatal=False)
def main():

View File

@ -12,7 +12,6 @@ from socket import gethostname as get_unit_hostname
from copy import deepcopy
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
from charmhelpers.contrib.storage.linux.ceph import modprobe
from charmhelpers.core.host import set_nic_mtu
from charmhelpers.contrib.openstack import templating
from charmhelpers.core.hookenv import (
log,
@ -30,7 +29,9 @@ from charmhelpers.core.host import (
write_file,
service_start,
service_stop,
service_running
service_running,
path_hash,
set_nic_mtu
)
from charmhelpers.fetch import (
apt_cache,
@ -47,6 +48,7 @@ PG_CONF = '%s/conf/pg/plumgrid.conf' % PG_LXC_DATA_PATH
PG_HN_CONF = '%s/conf/etc/hostname' % PG_LXC_DATA_PATH
PG_HS_CONF = '%s/conf/etc/hosts' % PG_LXC_DATA_PATH
PG_IFCS_CONF = '%s/conf/pg/ifcs.conf' % PG_LXC_DATA_PATH
OPS_CONF = '%s/conf/etc/00-pg.conf' % PG_LXC_DATA_PATH
AUTH_KEY_PATH = '%s/root/.ssh/authorized_keys' % PG_LXC_DATA_PATH
IFC_LIST_GW = '/var/run/plumgrid/lxc/ifc_list_gateway'
SUDOERS_CONF = '/etc/sudoers.d/ifc_ctl_sudoers'
@ -64,6 +66,10 @@ BASE_RESOURCE_MAP = OrderedDict([
'services': ['plumgrid'],
'contexts': [pg_gw_context.PGGwContext()],
}),
(OPS_CONF, {
'services': ['plumgrid'],
'contexts': [pg_gw_context.PGGwContext()],
}),
(PG_IFCS_CONF, {
'services': [],
'contexts': [pg_gw_context.PGGwContext()],
@ -161,7 +167,7 @@ def stop_pg():
Stops PLUMgrid service.
'''
service_stop('plumgrid')
time.sleep(30)
time.sleep(2)
def load_iovisor():
@ -380,3 +386,24 @@ def get_cidr_from_iface(interface):
return None
else:
return None
def director_cluster_ready():
dirs_count = len(pg_gw_context._pg_dir_context()['director_ips'])
return True if dirs_count == 1 or dirs_count == 3 else False
def restart_on_change(restart_map):
"""
Restart services based on configuration files changing
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
checksums = {path: path_hash(path) for path in restart_map}
f(*args, **kwargs)
for path in restart_map:
if path_hash(path) != checksums[path]:
restart_pg()
break
return wrapped_f
return wrap

View File

@ -1 +0,0 @@
pg_gw_hooks.py

View File

@ -0,0 +1 @@
$template ls_json,"{{'{'}}{{'%'}}timestamp:::date-rfc3339,jsonf:@timestamp%,%source:::jsonf:@source_host%,%msg:::json%}":syslogtag,isequal,"pg:" @{{ opsvm_ip }}:6000;ls_json

View File

@ -1,4 +1,4 @@
plumgrid_ip={{ local_ip }}
plumgrid_ip={{ director_ips_string }}
plumgrid_port=8001
mgmt_dev={{ interface }}
label={{ label}}

View File

@ -35,14 +35,14 @@ class PGGwContextTest(CharmTestCase):
@patch.object(charmhelpers.contrib.openstack.context,
'config_flags_parser')
@patch.object(context.PGGwContext, '_save_flag_file')
@patch.object(context, '_pg_dir_settings')
@patch.object(context, '_pg_dir_context')
@patch.object(charmhelpers.contrib.openstack.context,
'neutron_plugin_attribute')
@patch.object(utils, 'get_mgmt_interface')
@patch.object(utils, 'get_fabric_interface')
@patch.object(utils, 'get_gw_interfaces')
def test_neutroncc_context_api_rel(self, _gw_int, _fabric_int,
_mgmt_int, _npa, _pg_dir_settings,
_mgmt_int, _npa, _pg_dir_context,
_save_flag_file, _config_flag,
_unit_get, _unit_priv_ip, _config,
_is_clus, _https, _ens_pkgs):
@ -54,13 +54,14 @@ class PGGwContextTest(CharmTestCase):
self.maxDiff = None
_npa.side_effect = mock_npa
_unit_get.return_value = '192.168.100.201'
_unit_priv_ip.return_value = '192.168.100.201'
_unit_get.return_value = '192.168.100.203'
_unit_priv_ip.return_value = '192.168.100.203'
self.gethostname.return_value = 'node0'
self.getfqdn.return_value = 'node0'
_is_clus.return_value = False
_config_flag.return_value = False
_pg_dir_settings.return_value = {'pg_dir_ip': '192.168.100.201'}
_pg_dir_context.return_value = {'director_ips': ['192.168.100.201'],
'opsvm_ip': '127.0.0.1'}
_mgmt_int.return_value = 'juju-br0'
_fabric_int.return_value = 'juju-br0'
_gw_int.return_value = ['eth1']
@ -69,11 +70,12 @@ class PGGwContextTest(CharmTestCase):
'ext_interfaces': ['eth1'],
'config': 'neutron.randomconfig',
'core_plugin': 'neutron.randomdriver',
'local_ip': 'pg_dir_ip',
'local_ip': '192.168.100.203',
'director_ips_string': '192.168.100.201',
'network_manager': 'neutron',
'neutron_plugin': 'plumgrid',
'neutron_security_groups': None,
'neutron_url': 'https://192.168.100.201:9696',
'neutron_url': 'https://192.168.100.203:9696',
'pg_hostname': 'node0',
'pg_fqdn': 'node0',
'interface': 'juju-br0',
@ -81,5 +83,6 @@ class PGGwContextTest(CharmTestCase):
'label': 'node0',
'fabric_mode': 'host',
'neutron_alchemy_flags': False,
'opsvm_ip': '127.0.0.1',
}
self.assertEquals(expect, napi_ctxt())

View File

@ -19,7 +19,6 @@ utils.restart_map = _map
TO_PATCH = [
'remove_iovisor',
'apt_install',
'apt_purge',
'CONFIGS',
'log',
'configure_sources',
@ -30,7 +29,8 @@ TO_PATCH = [
'ensure_mtu',
'add_lcm_key',
'determine_packages',
'load_iptables'
'load_iptables',
'director_cluster_ready'
]
NEUTRON_CONF_DIR = "/etc/neutron"
@ -62,17 +62,12 @@ class PGGwHooksTests(CharmTestCase):
self.ensure_files.assert_called_with()
self.add_lcm_key.assert_called_with()
def test_plumgrid_joined(self):
self._call_hook('plumgrid-relation-joined')
def test_plumgrid_changed(self):
self._call_hook('plumgrid-relation-changed')
self.director_cluster_ready.return_value = True
self.ensure_mtu.assert_called_with()
self.ensure_files.assert_called_with()
self.add_lcm_key.assert_called_with()
self.CONFIGS.write_all.assert_called_with()
self.restart_pg.assert_called_with()
def test_stop(self):
_pkgs = ['plumgrid-lxc', 'iovisor-dkms']
self._call_hook('stop')
self.stop_pg.assert_called_with()
self.remove_iovisor.assert_called_with()
self.determine_packages.return_value = _pkgs

View File

@ -53,7 +53,8 @@ class TestPGGwUtils(CharmTestCase):
confs = [nutils.PG_CONF,
nutils.PG_HN_CONF,
nutils.PG_HS_CONF,
nutils.PG_IFCS_CONF]
nutils.PG_IFCS_CONF,
nutils.OPS_CONF]
self.assertItemsEqual(_regconfs.configs, confs)
def test_resource_map(self):
@ -69,6 +70,7 @@ class TestPGGwUtils(CharmTestCase):
(nutils.PG_CONF, ['plumgrid']),
(nutils.PG_HN_CONF, ['plumgrid']),
(nutils.PG_HS_CONF, ['plumgrid']),
(nutils.OPS_CONF, ['plumgrid']),
(nutils.PG_IFCS_CONF, []),
])
self.assertEqual(expect, _restart_map)