Merge "Add IPFIX exporting to OVS bridges."

This commit is contained in:
Zuul 2018-05-10 08:15:47 +00:00 committed by Gerrit Code Review
commit 98e685dc8b
14 changed files with 165 additions and 38 deletions

View File

@ -245,3 +245,9 @@ options:
description: |
A comma-separated list of Nagios service groups.
If left empty, the nagios_context will be used as the servicegroup
ipfix-target:
type: string
default:
description: |
IPFIX target wit the format "IP_Address:Port". This will enable IPFIX
exporting on all OVS bridges to the target, including br-int and br-ext.

View File

@ -175,3 +175,22 @@ def full_restart():
service('start', 'openvswitch-force-reload-kmod')
else:
service('force-reload-kmod', 'openvswitch-switch')
def enable_ipfix(bridge, target):
'''Enable IPfix on bridge to target.
:param bridge: Bridge to monitor
:param target: IPfix remote endpoint
'''
cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
'--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
log('Enabling IPfix on {}.'.format(bridge))
subprocess.check_call(cmd)
def disable_ipfix(bridge):
'''Diable IPfix on target bridge.
:param bridge: Bridge to modify
'''
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
subprocess.check_call(cmd)

View File

@ -151,6 +151,29 @@ def enable(soft_fail=False):
return True
def reload():
"""
Reload ufw
:returns: True if ufw is successfully enabled
"""
output = subprocess.check_output(['ufw', 'reload'],
universal_newlines=True,
env={'LANG': 'en_US',
'PATH': os.environ['PATH']})
m = re.findall('^Firewall reloaded\n',
output, re.M)
hookenv.log(output, level='DEBUG')
if len(m) == 0:
hookenv.log("ufw couldn't be reloaded", level='WARN')
return False
else:
hookenv.log("ufw reloaded", level='INFO')
return True
def disable():
"""
Disable ufw

View File

@ -6,6 +6,7 @@ global
group haproxy
spread-checks 0
stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats socket /var/run/haproxy/operator.sock mode 600 level operator
stats timeout 2m
defaults

View File

@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
{% if notification_topics -%}
topics = {{ notification_topics }}
{% endif -%}
{% if notification_format -%}
notification_format = {{ notification_format }}
{% endif -%}
{% endif -%}

View File

@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
ca_rel = ca_rel.split('-')[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided

View File

@ -21,6 +21,7 @@ import charmhelpers.contrib.openstack.context as context
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as host
import charmhelpers.core.templating as templating
import charmhelpers.core.unitdata as unitdata
VAULTLOCKER_BACKEND = 'charm-vaultlocker'
@ -36,30 +37,39 @@ class VaultKVContext(context.OSContextGenerator):
)
def __call__(self):
db = unitdata.kv()
last_token = db.get('last-token')
secret_id = db.get('secret-id')
for relation_id in hookenv.relation_ids(self.interfaces[0]):
for unit in hookenv.related_units(relation_id):
vault_url = hookenv.relation_get(
'vault_url',
unit=unit,
rid=relation_id
)
role_id = hookenv.relation_get(
'{}_role_id'.format(hookenv.local_unit()),
unit=unit,
rid=relation_id
)
data = hookenv.relation_get(unit=unit,
rid=relation_id)
vault_url = data.get('vault_url')
role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
token = data.get('{}_token'.format(hookenv.local_unit()))
if all([vault_url, role_id, token]):
token = json.loads(token)
vault_url = json.loads(vault_url)
# Tokens may change when secret_id's are being
# reissued - if so use token to get new secret_id
if token != last_token:
secret_id = retrieve_secret_id(
url=vault_url,
token=token
)
db.set('secret-id', secret_id)
db.set('last-token', token)
db.flush()
if vault_url and role_id:
ctxt = {
'vault_url': json.loads(vault_url),
'vault_url': vault_url,
'role_id': json.loads(role_id),
'secret_id': secret_id,
'secret_backend': self.secret_backend,
}
vault_ca = hookenv.relation_get(
'vault_ca',
unit=unit,
rid=relation_id
)
vault_ca = data.get('vault_ca')
if vault_ca:
ctxt['vault_ca'] = json.loads(vault_ca)
self.complete = True
@ -96,3 +106,21 @@ def vault_relation_complete(backend=None):
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
# TODO: contrib a high level unwrap method to hvac that works
def retrieve_secret_id(url, token):
"""Retrieve a response-wrapped secret_id from Vault
:param url: URL to Vault Server
:ptype url: str
:param token: One shot Token to use
:ptype token: str
:returns: secret_id to use for Vault Access
:rtype: str"""
import hvac
client = hvac.Client(url=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200:
data = response.json()
return data['data']['secret_id']

View File

@ -290,7 +290,7 @@ class Config(dict):
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous()
atexit(self._implicit_save)
@ -310,7 +310,11 @@ class Config(dict):
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
:param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
if type(sysctl_dict) is not dict:
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():

View File

@ -30,7 +30,9 @@ from charmhelpers.contrib.network.ovs import (
add_bridge_port,
is_linuxbridge_interface,
add_ovsbridge_linuxbridge,
full_restart
full_restart,
enable_ipfix,
disable_ipfix,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
@ -730,6 +732,20 @@ def configure_ovs():
else:
add_ovsbridge_linuxbridge(br, port)
target = config('ipfix-target')
bridges = [INT_BRIDGE, EXT_BRIDGE]
bridges.extend(bridgemaps.values())
if target:
for bridge in bridges:
disable_ipfix(bridge)
enable_ipfix(bridge, target)
else:
# NOTE: removing ipfix setting from a bridge is idempotent and
# will pass regardless of the existence of the setting
for bridge in bridges:
disable_ipfix(bridge)
# Ensure this runs so that mtu is applied to data-port interfaces if
# provided.
service_restart('os-charm-phy-nic-mtu')

View File

@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
ca_rel = ca_rel.split('-')[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided

View File

@ -290,7 +290,7 @@ class Config(dict):
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous()
atexit(self._implicit_save)
@ -310,7 +310,11 @@ class Config(dict):
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
:param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
if type(sysctl_dict) is not dict:
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():

View File

@ -39,6 +39,8 @@ TO_PATCH = [
'init_is_systemd',
'os_application_version_set',
'NeutronAPIContext',
'enable_ipfix',
'disable_ipfix',
]
@ -266,6 +268,19 @@ class TestNeutronUtils(CharmTestCase):
calls = [call('br-data', 'br-eth0')]
self.add_ovsbridge_linuxbridge.assert_has_calls(calls)
@patch('charmhelpers.contrib.openstack.context.config')
def test_configure_ovs_enable_ipfix(self, mock_config):
mock_config.side_effect = self.test_config.get
self.config.side_effect = self.test_config.get
self.test_config.set('plugin', 'ovs')
self.test_config.set('ipfix-target', '127.0.0.1:80')
neutron_utils.configure_ovs()
self.enable_ipfix.assert_has_calls([
call('br-int', '127.0.0.1:80'),
call('br-ex', '127.0.0.1:80'),
call('br-data', '127.0.0.1:80'),
])
@patch.object(neutron_utils, 'register_configs')
@patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
def test_do_openstack_upgrade(self, mock_renderer,