Update VPNaaS plugin for 7.0

- replaced deprecated parameters with actual ones
- update metadata file for 7.0
- removed custom packages because upstream mirrors are available for cluster
- removed old and unused code
- refactored OCF script

Change-Id: Id7cf8037ade92bae346164ae14a4a2162ce27cee
This commit is contained in:
Sergey Kolekonov 2015-08-03 18:13:02 +03:00
parent 867db9c18f
commit ce1ef8899b
21 changed files with 170 additions and 1084 deletions

View File

@ -4,7 +4,7 @@ VPNaaS plugin
VPNaaS (VPN-as-a-Service) is a Neutron extension that introduces VPN feature set.
This repo contains all necessary files to build VPNaaS Fuel plugin.
Currently, the only supported Fuel version is 6.1.
Supported Fuel version is 7.0.
Building the plugin
-------------------
@ -48,6 +48,8 @@ VPNaaS functionality. No user interaction is required,
VPNaaS will be enabled
immediately after deployment.
Currently this plugin is not compatible with Neutron DVR
Accessing VPNaaS functionality
------------------------------

View File

@ -1,7 +0,0 @@
source 'https://rubygems.org'
puppetversion = ENV.key?('PUPPET_VERSION') ? "= #{ENV['PUPPET_VERSION']}" : ['>= 3.3']
gem 'puppet', puppetversion
gem 'puppetlabs_spec_helper', '>= 0.1.0'
gem 'puppet-lint', '>= 0.3.2'
gem 'facter', '>= 1.7.0'

View File

@ -1,18 +0,0 @@
require 'rubygems'
require 'puppetlabs_spec_helper/rake_tasks'
require 'puppet-lint/tasks/puppet-lint'
PuppetLint.configuration.send('disable_80chars')
PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"]
desc "Validate manifests, templates, and ruby files"
task :validate do
Dir['manifests/**/*.pp'].each do |manifest|
sh "puppet parser validate --noop #{manifest}"
end
Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file|
sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/
end
Dir['templates/**/*.erb'].each do |template|
sh "erb -P -x -T '-' #{template} | ruby -c"
end
end

View File

@ -17,20 +17,19 @@
# OCF instance parameters:
# OCF_RESKEY_binary
# OCF_RESKEY_config
# OCF_RESKEY_plugin_config
# OCF_RESKEY_vpn_config
# OCF_RESKEY_plugin_config
# OCF_RESKEY_log_file
# OCF_RESKEY_user
# OCF_RESKEY_pid
# OCF_RESKEY_neutron_server_port
# OCF_RESKEY_additional_parameters
# OCF_RESKEY_external_bridge
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
umask 0022
#######################################################################
# Fill in some defaults if no values are specified
@ -39,39 +38,23 @@ PATH=/sbin:/usr/sbin:/bin:/usr/bin
OCF_RESKEY_binary_default="neutron-vpn-agent"
OCF_RESKEY_config_default="/etc/neutron/neutron.conf"
OCF_RESKEY_keystone_config_default="/etc/keystone/keystone.conf"
OCF_RESKEY_vpn_config_default="/etc/neutron/vpn_agent.ini"
OCF_RESKEY_plugin_config_default="/etc/neutron/l3_agent.ini"
OCF_RESKEY_log_file_default="/var/log/neutron/vpn-agent.log"
OCF_RESKEY_vpn_config_default="/etc/neutron/vpn_agent.ini"
OCF_RESKEY_user_default="neutron"
OCF_RESKEY_pid_default="${HA_RSCTMP}/${__SCRIPT_NAME}/${__SCRIPT_NAME}.pid"
OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0"
OCF_RESKEY_username_default="neutron"
OCF_RESKEY_password_default="neutron_pass"
OCF_RESKEY_tenant_default="services"
OCF_RESKEY_external_bridge_default="br-ex"
OCF_RESKEY_multiple_agents_default=true
OCF_RESKEY_rescheduling_tries_default=5
OCF_RESKEY_rescheduling_interval_default=33
OCF_RESKEY_debug_default=false
OCF_RESKEY_log_file_default="/var/log/neutron/vpn-agent.log"
OCF_RESKEY_remove_artifacts_on_stop_start_default='true'
: ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}}
: ${OCF_RESKEY_username=${OCF_RESKEY_username_default}}
: ${OCF_RESKEY_password=${OCF_RESKEY_password_default}}
: ${OCF_RESKEY_tenant=${OCF_RESKEY_tenant_default}}
: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
: ${OCF_RESKEY_keystone_config=${OCF_RESKEY_keystone_config_default}}
: ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}}
: ${OCF_RESKEY_vpn_config=${OCF_RESKEY_vpn_config_default}}
: ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}}
: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
: ${OCF_RESKEY_multiple_agents=${OCF_RESKEY_multiple_agents_default}}
: ${OCF_RESKEY_external_bridge=${OCF_RESKEY_external_bridge_default}}
: ${OCF_RESKEY_debug=${OCF_RESKEY_debug_default}}
: ${OCF_RESKEY_rescheduling_tries=${OCF_RESKEY_rescheduling_tries_default}}
: ${OCF_RESKEY_rescheduling_interval=${OCF_RESKEY_rescheduling_interval_default}}
: ${OCF_RESKEY_log_file=${OCF_RESKEY_log_file_default}}
: ${OCF_RESKEY_remove_artifacts_on_stop_start=${OCF_RESKEY_remove_artifacts_on_stop_start_default}}
#######################################################################
@ -83,6 +66,7 @@ usage() {
The 'start' operation starts the networking service.
The 'stop' operation stops the networking service.
The 'reload' operation restarts the networking service without removing any artifacts
The 'validate-all' operation reports whether the parameters are valid
The 'meta-data' operation reports this RA's meta-data information
The 'status' operation reports whether the networking service is running
@ -99,42 +83,43 @@ meta_data() {
<version>1.0</version>
<longdesc lang="en">
Resource agent for the OpenStack Router (neutron-vpn-agent)
Resource agent for the OpenStack VPN agent (neutron-vpn-agent)
May manage a neutron-vpn-agent instance or a clone set that
creates a distributed neutron-vpn-agent cluster.
</longdesc>
<shortdesc lang="en">Manages the OpenStack L3 Service (neutron-vpn-agent)</shortdesc>
<parameters>
<parameter name="dummy" unique="1">
<longdesc lang="en">
This is a dummy parameter.
Pacemaker needs it to enable reload operation for the resource
</longdesc>
<shortdesc lang="en">Dummy parameter</shortdesc>
<content type="boolean" default="false" />
</parameter>
<parameter name="binary" unique="0" required="0">
<longdesc lang="en">
Location of the OpenStack Router server binary (neutron-vpn-agent)
Location of the OpenStack VPN agent server binary (neutron-vpn-agent)
</longdesc>
<shortdesc lang="en">OpenStack Router server binary (neutron-vpn-agent)</shortdesc>
<shortdesc lang="en">OpenStack VPN agent server binary (neutron-vpn-agent)</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter>
<parameter name="config" unique="0" required="0">
<longdesc lang="en">
Location of the OpenStack Router (neutron-server) configuration file
Location of the OpenStack VPN agent (neutron-server) configuration file
</longdesc>
<shortdesc lang="en">OpenStack Router (neutron-server) config file</shortdesc>
<shortdesc lang="en">OpenStack VPN agent (neutron-server) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" />
</parameter>
<parameter name="keystone_config" unique="0" required="0">
<longdesc lang="en">
Location of the Keystone configuration file
</longdesc>
<shortdesc lang="en">OpenStack Keystone config file</shortdesc>
<content type="string" default="${OCF_RESKEY_keystone_config_default}" />
</parameter>
<parameter name="plugin_config" unique="0" required="0">
<longdesc lang="en">
Location of the OpenStack L3 Service (neutron-l3-agent) configuration file
Location of the OpenStack L3 Service (neutron-vpn-agent) configuration file
</longdesc>
<shortdesc lang="en">OpenStack Router (neutron-l3-agent) config file</shortdesc>
<shortdesc lang="en">OpenStack VPN agent (neutron-vpn-agent) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_plugin_config_default}" />
</parameter>
@ -162,14 +147,6 @@ The pid file to use for this OpenStack L3 Service (neutron-vpn-agent) instance
<content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter>
<parameter name="multiple_agents" unique="0" required="0">
<longdesc lang="en">
Flag, that switch RCS-agent behavior for multiple or single L3-agent.
</longdesc>
<shortdesc lang="en">Switsh between multiple or single L3-agent behavior</shortdesc>
<content type="string" default="${OCF_RESKEY_multiple_agents_default}" />
</parameter>
<parameter name="log_file" unique="0" required="0">
<longdesc lang="en">
The log file to use for this OpenStack L3 Service (neutron-vpn-agent) instance
@ -178,93 +155,20 @@ The log file to use for this OpenStack L3 Service (neutron-vpn-agent) instance
<content type="string" default="${OCF_RESKEY_log_file_default}" />
</parameter>
<parameter name="neutron_server_port" unique="0" required="0">
<longdesc lang="en">
The listening port number of the AMQP server. Mandatory to perform a monitor check
</longdesc>
<shortdesc lang="en">AMQP listening port</shortdesc>
<content type="integer" default="${OCF_RESKEY_neutron_server_port_default}" />
</parameter>
<parameter name="username" unique="0" required="0">
<longdesc lang="en">
Neutron username for port list fetching
</longdesc>
<shortdesc lang="en">Neutron username</shortdesc>
<content type="string" default="${OCF_RESKEY_username_default}" />
</parameter>
<parameter name="password" unique="0" required="0">
<longdesc lang="en">
Neutron password for port list fetching
</longdesc>
<shortdesc lang="en">Neutron password</shortdesc>
<content type="string" default="${OCF_RESKEY_password_default}" />
</parameter>
<parameter name="os_auth_url" unique="0" required="0">
<longdesc lang="en">
URL of keystone
</longdesc>
<shortdesc lang="en">Keystone URL</shortdesc>
<content type="string" default="${OCF_RESKEY_os_auth_url_default}" />
</parameter>
<parameter name="tenant" unique="0" required="0">
<longdesc lang="en">
Admin tenant name
</longdesc>
<shortdesc lang="en">Admin tenant</shortdesc>
<content type="string" default="${OCF_RESKEY_tenant_default}" />
</parameter>
<parameter name="external_bridge" unique="0" required="0">
<longdesc lang="en">
External bridge for vpn-agent
External bridge for l3-agent
</longdesc>
<shortdesc lang="en">External bridge</shortdesc>
<content type="string" />
</parameter>
<parameter name="debug" unique="0" required="0">
<longdesc lang="en">
Enable debug logging
</longdesc>
<shortdesc lang="en">Enable debug logging</shortdesc>
<content type="boolean" default="false"/>
</parameter>
<parameter name="rescheduling_tries" unique="0" required="0">
<longdesc lang="en">
Tries to start rescheduling script after start of agent.
</longdesc>
<shortdesc lang="en">Tries to start rescheduling script after start of agent.</shortdesc>
<content type="boolean" default="${OCF_RESKEY_rescheduling_tries_default}"/>
</parameter>
<parameter name="rescheduling_interval" unique="0" required="0">
<longdesc lang="en">
Interval between starts of rescheduling script.
</longdesc>
<shortdesc lang="en">Interval between starts of rescheduling script.</shortdesc>
<content type="boolean" default="${OCF_RESKEY_rescheduling_interval_default}"/>
</parameter>
<parameter name="syslog" unique="0" required="0">
<longdesc lang="en">
Enable logging to syslog
</longdesc>
<shortdesc lang="en">Enable logging to syslog</shortdesc>
<content type="boolean" default="false"/>
</parameter>
<parameter name="additional_parameters" unique="0" required="0">
<parameter name="remove_artifacts_on_stop_start" unique="0" required="0">
<longdesc lang="en">
Additional parameters to pass on to the OpenStack L3 Service (neutron-vpn-agent)
Clean up all resources created by Neutron VPN agent, such as additional processes,
network namespaces, created interfaces, on agent stop and start.
</longdesc>
<shortdesc lang="en">Additional parameters for neutron-vpn-agent</shortdesc>
<shortdesc lang="en">Clean up all resources created by VPN agent on its start and stop</shortdesc>
<content type="string" />
</parameter>
@ -275,6 +179,7 @@ Additional parameters to pass on to the OpenStack L3 Service (neutron-vpn-agent)
<actions>
<action name="start" timeout="20" />
<action name="stop" timeout="20" />
<action name="reload" timeout="30" />
<action name="status" timeout="20" />
<action name="monitor" timeout="30" interval="20" />
<action name="validate-all" timeout="5" />
@ -301,7 +206,7 @@ get_worker_pid() {
#######################################################################
# Functions invoked by resource manager actions
neutron_l3_agent_validate() {
neutron_vpn_agent_validate() {
local rc
check_binary $OCF_RESKEY_binary
@ -327,23 +232,7 @@ neutron_l3_agent_validate() {
true
}
setup_auth() {
# setup token-based authentication if it possible
AUTH_TOKEN=""
if [[ -f $OCF_RESKEY_keystone_config ]] ; then
AUTH_TOKEN=$(grep -v '#' $OCF_RESKEY_keystone_config | grep -i 'admin_token\s*=\s*' | awk -F'=' '{print $2}')
fi
AUTH_TAIL=""
if [[ -n "$AUTH_TOKEN" ]] ; then
AUTH_TAIL="--admin-auth-url=${OCF_RESKEY_os_auth_url} --auth-token=${AUTH_TOKEN}"
fi
true
}
neutron_l3_agent_status() {
neutron_vpn_agent_status() {
local pid
local f_pid
local rc
@ -394,189 +283,121 @@ neutron_l3_agent_status() {
}
get_ns_list() {
local rv=`ip netns list | grep -Ee "^qrouter-.*"`
echo $rv
}
get_pid_list_for_ns_list() {
# Parameters contain namespace names for searching pids
local ns_list="$@"
local pids=`for netns in $ns_list ; do ip netns pids $netns ; done`
echo $pids
}
clean_up() {
# kill processes inside network namespaces
ns_list=`get_ns_list`
# kill all proceses from all dhcp-agent's net.namespaces, that using ip
count=3 # we will try kill process 3 times
while [ $count -gt 0 ]; do
# we can't use ps, because ps can't select processes for given network namespace
inside_ns_pids=`get_pid_list_for_ns_list "$ns_list"`
if [ -z "$inside_ns_pids" ] ; then
break
fi
for ns_pid in $inside_ns_pids ; do
ocf_run kill $ns_pid
done
sleep 1
count=$(($count - 1))
done
# kill all remaining proceses, that not died by simple kill
inside_ns_pids=`get_pid_list_for_ns_list "$ns_list"`
if [ ! -z "$inside_ns_pids" ] ; then
for ns_pid in $inside_ns_pids ; do
ocf_run kill -9 $ns_pid
done
fi
# cleanup network interfaces
q-agent-cleanup.py --agent=l3 --cleanup-ports
}
clean_up_namespaces() {
# kill unnided network namespaces.
#
# Be carefully. In each network namespace shouldn't be any processes
# using network!!! use clean_up before it
ns_list=`get_ns_list`
if [ ! -z "$ns_list" ] ; then
for ns_name in $ns_list ; do
ocf_run ip --force netns del $ns_name
done
fi
}
neutron_l3_agent_monitor() {
neutron_l3_agent_status
neutron_vpn_agent_monitor() {
neutron_vpn_agent_status
rc=$?
return $rc
}
neutron_l3_agent_start() {
neutron_vpn_agent_start() {
local rc
# This variable is overridden by reload operation
# to perform fast resource restart
local remove_artifacts_on_stop_start=${1:-$OCF_RESKEY_remove_artifacts_on_stop_start}
neutron_l3_agent_status
neutron_vpn_agent_status
rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack neutron-l3-agent already running"
ocf_log info "OpenStack neutron-vpn-agent already running"
return $OCF_SUCCESS
fi
clean_up
sleep 1
clean_up_namespaces
if ocf_is_true "$remove_artifacts_on_stop_start"; then
neutron-netns-cleanup --agent-type=l3 --force --config-file $OCF_RESKEY_config
fi
# run and detach to background agent as daemon.
# Don't use ocf_run as we're sending the tool's output to /dev/null
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
--config-file=$OCF_RESKEY_plugin_config --config-file=$OCF_RESKEY_vpn_config --log-file=$OCF_RESKEY_log_file $OCF_RESKEY_additional_parameters \
--config-file=$OCF_RESKEY_plugin_config --config-file=$OCF_RESKEY_vpn_config --log-file=$OCF_RESKEY_log_file \
>> /dev/null"' 2>&1 & echo \$! > $OCF_RESKEY_pid'
ocf_log debug "Create pid file: ${OCF_RESKEY_pid} with content $(cat ${OCF_RESKEY_pid})"
# Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required
while true; do
neutron_l3_agent_monitor
neutron_vpn_agent_monitor
rc=$?
[ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ] ; then
ocf_log err "OpenStack neutron-l3-agent start failed"
ocf_log err "OpenStack neutron-vpn-agent start failed"
exit $OCF_ERR_GENERIC
fi
sleep 3
done
if ! ocf_is_true "$OCF_RESKEY_multiple_agents" ; then
# detach deferred rescheduling procedure
RESCHEDULING_CMD="q-agent-cleanup.py --agent=l3 --reschedule --remove-dead ${AUTH_TAIL} 2>&1 >> /var/log/neutron/rescheduling.log"
RESCH_CMD=''
for ((i=0; i<$OCF_RESKEY_rescheduling_tries; i++)) ; do
RESCH_CMD="$RESCH_CMD sleep $OCF_RESKEY_rescheduling_interval ; $RESCHEDULING_CMD ;"
done
bash -c "$RESCH_CMD" &
fuel-fdb-cleaner --ssh-keyfile /root/.ssh/id_rsa_neutron -l /var/log/neutron/fdb-cleaner.log
fi
ocf_log info "OpenStack Router (neutron-l3-agent) started"
ocf_log info "OpenStack VPN agent (neutron-vpn-agent) started"
return $OCF_SUCCESS
}
neutron_l3_agent_stop() {
neutron_vpn_agent_stop() {
local rc
local pid
# This variable is overridden by reload operation
# to perform fast resource restart
local remove_artifacts_on_stop_start=${1:-$OCF_RESKEY_remove_artifacts_on_stop_start}
neutron_l3_agent_status
neutron_vpn_agent_status
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up
sleep 1
clean_up_namespaces
ocf_log info "OpenStack Router ($OCF_RESKEY_binary) already stopped"
return $OCF_SUCCESS
fi
# Try SIGTERM
pid=`get_worker_pid`
if [ "xxx$pid" == "xxx" ] ; then
ocf_log warn "OpenStack Router ($OCF_RESKEY_binary) not running."
#return $OCF_NOT_RUNNING
return $OCF_SUCCESS
fi
ocf_run kill -s TERM $pid
rc=$?
if [ $rc -ne 0 ]; then
ocf_log err "OpenStack Router ($OCF_RESKEY_binary) couldn't be stopped"
exit $OCF_ERR_GENERIC
fi
# stop waiting
shutdown_timeout=15
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5))
fi
count=0
while [ $count -lt $shutdown_timeout ]; do
neutron_l3_agent_status
rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then
break
if ocf_is_true "$remove_artifacts_on_stop_start"; then
neutron-netns-cleanup --agent-type=l3 --force --config-file $OCF_RESKEY_config
fi
count=`expr $count + 1`
sleep 1
ocf_log debug "OpenStack Router ($OCF_RESKEY_binary) still hasn't stopped yet. Waiting ..."
ocf_log info "OpenStack VPN agent ($OCF_RESKEY_binary) already stopped"
return $OCF_SUCCESS
fi
# Terminate agent daemon
pid=`get_worker_pid`
shutdown_timeout=15
iteration_time=1
if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-6))
fi
clock=0
# Try to terminate gracefully
while [ -d /proc/${pid}/ ] && [ $clock -lt $shutdown_timeout ]; do
ocf_log debug "Stopping VPN agent (${OCF_RESKEY_binary}) gracefully with SIGTERM"
ocf_run kill -s TERM ${pid}
sleep $iteration_time
((clock+=$iteration_time))
done
neutron_l3_agent_status
rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL
ocf_log info "OpenStack Router ($OCF_RESKEY_binary) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid
# Send kill signal if process is still up
if [ -d /proc/${pid}/ ] ; then
ocf_log debug "Killing VPN agent (${OCF_RESKEY_binary}) with SIGKILL"
ocf_run kill -s KILL ${pid}
sleep 1
if [ -d /proc/${pid}/ ] ; then
ocf_log err "OpenStack VPN agent (${OCF_RESKEY_binary}) stop failed"
return $OCF_ERR_GENERIC
fi
fi
ocf_log info "OpenStack Router ($OCF_RESKEY_binary) stopped"
ocf_log info "OpenStack VPN agent ($OCF_RESKEY_binary) stopped"
ocf_log debug "Delete pid file: ${OCF_RESKEY_pid} with content $(cat ${OCF_RESKEY_pid})"
rm -f $OCF_RESKEY_pid
clean_up
sleep 1
clean_up_namespaces
if ! ocf_is_true "$OCF_RESKEY_multiple_agents" ; then
echo ok >> /var/log/neutron/rescheduling.log &
q-agent-cleanup.py --agent=l3 --remove-self ${AUTH_TAIL} 2>&1 >> /var/log/neutron/rescheduling.log &
if ocf_is_true "$remove_artifacts_on_stop_start"; then
neutron-netns-cleanup --agent-type=l3 --force --config-file $OCF_RESKEY_config
fi
sleep 3
return $OCF_SUCCESS
}
neutron_vpn_agent_reload() {
# Call stop and start without removing artifacts
neutron_vpn_agent_stop false
neutron_vpn_agent_start false
}
#######################################################################
case "$1" in
@ -587,17 +408,17 @@ case "$1" in
esac
# Anything except meta-data and help must pass validation
neutron_l3_agent_validate || exit $?
setup_auth || exit $?
neutron_vpn_agent_validate || exit $?
umask 0022
# What kind of method was invoked?
case "$1" in
start) neutron_l3_agent_start;;
stop) neutron_l3_agent_stop;;
status) neutron_l3_agent_status;;
monitor) neutron_l3_agent_monitor;;
start) neutron_vpn_agent_start;;
stop) neutron_vpn_agent_stop;;
reload) neutron_vpn_agent_reload;;
status) neutron_vpn_agent_status;;
monitor) neutron_vpn_agent_monitor;;
validate-all) ;;
*) usage
exit $OCF_ERR_UNIMPLEMENTED;;
esac
esac

View File

@ -1,631 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
from ConfigParser import SafeConfigParser
import functools
import json
import logging
import logging.config
import logging.handlers
import re
import socket
import StringIO
import subprocess
import sys
from time import sleep
from neutronclient.neutron import client as n_client
LOG_NAME = 'q-agent-cleanup'
API_VER = '2.0'
PORT_ID_PART_LEN = 11
def make_logger(handler=logging.StreamHandler(sys.stdout), level=logging.INFO):
format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(format)
logger = logging.getLogger(LOG_NAME)
logger.addHandler(handler)
logger.setLevel(level)
return logger
LOG = make_logger()
AUTH_KEYS = {
'tenant_name': 'admin_tenant_name',
'username': 'admin_user',
'password': 'admin_password',
'auth_url': 'auth_uri',
}
def get_auth_data(cfg_file, section='keystone_authtoken', keys=AUTH_KEYS):
cfg = SafeConfigParser()
with open(cfg_file) as f:
cfg.readfp(f)
auth_data = {}
for key, value in keys.iteritems():
auth_data[key] = cfg.get(section, value)
return auth_data
# Note(xarses): be careful not to inject \n's into the regex pattern
# or it will case the maching to fail
RECOVERABLE = re.compile((
'(HTTP\s+400\))|'
'(400-\{\'message\'\:\s+\'\'\})|'
'(\[Errno 111\]\s+Connection\s+refused)|'
'(503\s+Service\s+Unavailable)|'
'(504\s+Gateway\s+Time-out)|'
'(\:\s+Maximum\s+attempts\s+reached)|'
'(Unauthorized\:\s+bad\s+credentials)|'
'(Max\s+retries\s+exceeded)|'
"""('*NoneType'*\s+object\s+ha'\s+no\s+attribute\s+'*__getitem__'*$)|"""
'(No\s+route\s+to\s+host$)|'
'(Lost\s+connection\s+to\s+MySQL\s+server)'), flags=re.M)
RETRY_COUNT = 50
RETRY_DELAY = 2
def retry(func, pattern=RECOVERABLE):
@functools.wraps(func)
def wrapper(*args, **kwargs):
i = 0
while True:
try:
return func(*args, **kwargs)
except Exception as e:
if pattern and not pattern.match(e.message):
raise e
i += 1
if i >= RETRY_COUNT:
raise e
LOG.debug("retry request {0}: {1}".format(i, e))
sleep(RETRY_DELAY)
return wrapper
class NeutronCleaner(object):
PORT_NAME_PREFIXES_BY_DEV_OWNER = {
'network:dhcp': 'tap',
'network:router_gateway': 'qg-',
'network:router_interface': 'qr-',
}
PORT_NAME_PREFIXES = {
# contains tuples of prefixes
'dhcp': (PORT_NAME_PREFIXES_BY_DEV_OWNER['network:dhcp'],),
'l3': (
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_gateway'],
PORT_NAME_PREFIXES_BY_DEV_OWNER['network:router_interface']
)
}
BRIDGES_FOR_PORTS_BY_AGENT = {
'dhcp': ('br-int',),
'l3': ('br-int', 'br-ex'),
}
PORT_OWNER_PREFIXES = {
'dhcp': ('network:dhcp',),
'l3': ('network:router_gateway', 'network:router_interface')
}
NS_NAME_PREFIXES = {
'dhcp': 'qdhcp',
'l3': 'qrouter',
}
AGENT_BINARY_NAME = {
'dhcp': 'neutron-dhcp-agent',
'l3': 'neutron-vpn-agent',
'ovs': 'neutron-openvswitch-agent'
}
CMD__list_ovs_port = ['ovs-vsctl', 'list-ports']
CMD__remove_ovs_port = ['ovs-vsctl', '--', '--if-exists', 'del-port']
CMD__remove_ip_addr = ['ip', 'address', 'delete']
CMD__ip_netns_list = ['ip', 'netns', 'list']
CMD__ip_netns_exec = ['ip', 'netns', 'exec']
# 14: tap-xxxyyyzzz:
RE__port_in_portlist = re.compile(r"^\s*\d+\:\s+([\w-]+)\:")
def __init__(self, options, log=None):
self.log = log
self.auth_data = get_auth_data(cfg_file=options.get('authconf'))
self.options = options
self.agents = {}
self.debug = options.get('debug')
self.RESCHEDULING_CALLS = {
'dhcp': self._reschedule_agent_dhcp,
'l3': self._reschedule_agent_l3,
}
self._client = None
@property
@retry
def client(self):
if self._client is None:
self._client = n_client.Client(API_VER, **self.auth_data)
return self._client
@retry
def _get_agents(self, use_cache=True):
return self.client.list_agents()['agents']
@retry
def _get_routers(self, use_cache=True):
return self.client.list_routers()['routers']
@retry
def _get_networks(self, use_cache=True):
return self.client.list_networks()['networks']
@retry
def _list_networks_on_dhcp_agent(self, agent_id):
return self.client.list_networks_on_dhcp_agent(
agent_id)['networks']
@retry
def _list_routers_on_l3_agent(self, agent_id):
return self.client.list_routers_on_l3_agent(
agent_id)['routers']
@retry
def _list_l3_agents_on_router(self, router_id):
return self.client.list_l3_agent_hosting_routers(
router_id)['agents']
@retry
def _list_dhcp_agents_on_network(self, network_id):
return self.client.list_dhcp_agent_hosting_networks(
network_id)['agents']
def _list_orphaned_networks(self):
networks = self._get_networks()
self.log.debug(
"_list_orphaned_networks:, got list of networks {0}".format(
json.dumps(networks, indent=4)))
orphaned_networks = []
for network in networks:
if len(self._list_dhcp_agents_on_network(network['id'])) == 0:
orphaned_networks.append(network['id'])
self.log.debug(
"_list_orphaned_networks:, got list of orphaned networks {0}".
format(orphaned_networks))
return orphaned_networks
def _list_orphaned_routers(self):
routers = self._get_routers()
self.log.debug(
"_list_orphaned_routers:, got list of routers {0}".format(
json.dumps(routers, indent=4)))
orphaned_routers = []
for router in routers:
if len(self._list_l3_agents_on_router(router['id'])) == 0:
orphaned_routers.append(router['id'])
self.log.debug(
"_list_orphaned_routers:, got list of orphaned routers {0}".format(
orphaned_routers))
return orphaned_routers
@retry
def _add_network_to_dhcp_agent(self, agent_id, net_id):
return self.client.add_network_to_dhcp_agent(
agent_id, {"network_id": net_id})
@retry
def _add_router_to_l3_agent(self, agent_id, router_id):
return self.client.add_router_to_l3_agent(
agent_id, {"router_id": router_id})
@retry
def _remove_router_from_l3_agent(self, agent_id, router_id):
return self.client.remove_router_from_l3_agent(
agent_id, router_id)
@retry
def _delete_agent(self, agent_id):
return self.client.delete_agent(agent_id)
def _get_agents_by_type(self, agent, use_cache=True):
self.log.debug("_get_agents_by_type: start.")
rv = self.agents.get(agent, []) if use_cache else []
if not rv:
agents = self._get_agents(use_cache=use_cache)
for i in agents:
if i['binary'] == self.AGENT_BINARY_NAME.get(agent):
rv.append(i)
from_cache = ''
else:
from_cache = ' from local cache'
self.log.debug(
"_get_agents_by_type: end, {0} rv: {1}".format(
from_cache, json.dumps(rv, indent=4)))
return rv
def _execute(self, cmd):
process = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(stdout, stderr) = process.communicate()
ret_code = process.returncode
if ret_code != 0:
self.log.error(
"ERROR (rc={0}) while execution {1}, stderr: {2}".format(
ret_code, ' '.join(cmd), stderr))
return None
return ret_code, stdout
def __collect_namespaces_for_agent(self, agent):
cmd = self.CMD__ip_netns_list[:]
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
ret_code, stdout = self._execute(cmd)
if ret_code != 0:
return []
# filter namespaces by given agent type
netns = []
for ns in StringIO.StringIO(stdout):
ns = ns.strip()
self.log.debug("Found network namespace '{0}'".format(ns))
if ns.startswith(self.NS_NAME_PREFIXES[agent]):
netns.append(ns)
return netns
def __collect_ports_for_namespace(self, ns):
cmd = self.CMD__ip_netns_exec[:]
cmd.extend([ns, 'ip', 'l', 'show'])
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
ret_code, stdout = self._execute(cmd)
if ret_code != 0:
return []
ports = []
for line in StringIO.StringIO(stdout):
pp = self.RE__port_in_portlist.match(line)
if not pp:
continue
port = pp.group(1)
if port != 'lo':
self.log.debug("Found port '{0}'".format(port))
ports.append(port)
return ports
def _cleanup_ports(self, agent):
self.log.debug("_cleanup_ports: start.")
# get namespaces list
netns = self.__collect_namespaces_for_agent(agent)
# collect ports from namespace
ports = []
for ns in netns:
ports.extend(self.__collect_ports_for_namespace(ns))
# iterate by port_list and remove port from OVS
for port in ports:
cmd = self.CMD__remove_ovs_port[:]
cmd.append(port)
if self.options.get('noop'):
self.log.info("NOOP-execution: '{0}'".format(' '.join(cmd)))
else:
self.log.debug("Execute command '{0}'".format(' '.join(cmd)))
self._execute(cmd)
self.log.debug("_cleanup_ports: end.")
return True
def _reschedule_agent_dhcp(self, agent_type):
self.log.debug("_reschedule_agent_dhcp: start.")
agents = {
'alive': [],
'dead': []
}
# collect networklist from dead DHCP-agents
dead_networks = []
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info(
"found alive DHCP agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info(
"found dead DHCP agent: {0}".format(agent['id']))
agents['dead'].append(agent)
for net in self._list_networks_on_dhcp_agent(agent['id']):
dead_networks.append(net)
if dead_networks and agents['alive']:
# get network-ID list of already attached to alive agent networks
lucky_ids = set()
map(
lambda net: lucky_ids.add(net['id']),
self._list_networks_on_dhcp_agent(agents['alive'][0]['id'])
)
# add dead networks to alive agent
for net in dead_networks:
if net['id'] not in lucky_ids:
# attach network to agent
self.log.info(
"attach network {net} to DHCP agent {agent}".format(
net=net['id'],
agent=agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_network_to_dhcp_agent(
agents['alive'][0]['id'], net['id'])
# remove dead agents if need (and if found alive agent)
if self.options.get('remove-dead'):
for agent in agents['dead']:
self.log.info(
"remove dead DHCP agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._delete_agent(agent['id'])
orphaned_networks = self._list_orphaned_networks()
self.log.info("_reschedule_agent_dhcp: rescheduling orphaned networks")
if orphaned_networks and agents['alive']:
for network in orphaned_networks:
self.log.info(
"_reschedule_agent_dhcp: rescheduling {0} to {1}".format(
network, agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_network_to_dhcp_agent(
agents['alive'][0]['id'], network)
self.log.info(
"_reschedule_agent_dhcp: ended rescheduling of orphaned networks")
self.log.debug("_reschedule_agent_dhcp: end.")
def _reschedule_agent_l3(self, agent_type):
self.log.debug("_reschedule_agent_l3: start.")
agents = {
'alive': [],
'dead': []
}
# collect router-list from dead DHCP-agents
dead_routers = [] # array of tuples (router, agentID)
for agent in self._get_agents_by_type(agent_type):
if agent['alive']:
self.log.info("found alive L3 agent: {0}".format(agent['id']))
agents['alive'].append(agent)
else:
# dead agent
self.log.info("found dead L3 agent: {0}".format(agent['id']))
agents['dead'].append(agent)
map(
lambda rou: dead_routers.append((rou, agent['id'])),
self._list_routers_on_l3_agent(agent['id'])
)
self.log.debug(
"L3 agents in cluster: {0}".format(
json.dumps(agents, indent=4)))
self.log.debug("Routers, attached to dead L3 agents: {0}".format(
json.dumps(dead_routers, indent=4)))
if dead_routers and agents['alive']:
# get router-ID list of already attached to alive agent routerss
lucky_ids = set()
map(
lambda rou: lucky_ids.add(rou['id']),
self._list_routers_on_l3_agent(agents['alive'][0]['id'])
)
# remove dead agents after rescheduling
for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'):
self._delete_agent(agent['id'])
# move routers from dead to alive agent
for rou in filter(
lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers):
self.log.info(
"schedule router {0} to L3 agent {1}".format(
rou[0]['id'],
agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_router_to_l3_agent(
agents['alive'][0]['id'], rou[0]['id'])
orphaned_routers = self._list_orphaned_routers()
self.log.info("_reschedule_agent_l3: rescheduling orphaned routers")
if orphaned_routers and agents['alive']:
for router in orphaned_routers:
self.log.info(
"_reschedule_agent_l3: rescheduling {0} to {1}".format(
router, agents['alive'][0]['id']))
if not self.options.get('noop'):
self._add_router_to_l3_agent(
agents['alive'][0]['id'], router)
self.log.info(
"_reschedule_agent_l3: ended rescheduling of orphaned routers")
self.log.debug("_reschedule_agent_l3: end.")
def _remove_self(self, agent_type):
self.log.debug("_remove_self: start.")
for agent in self._get_agents_by_type(agent_type):
if agent['host'] == socket.gethostname():
self.log.info(
"_remove_self: deleting our own agent {0} of type {1}".
format(agent['id'], agent_type))
if not self.options.get('noop'):
self._delete_agent(agent['id'])
self.log.debug("_remove_self: end.")
def _reschedule_agent(self, agent):
self.log.debug("_reschedule_agents: start.")
task = self.RESCHEDULING_CALLS.get(agent, None)
if task:
task(agent)
self.log.debug("_reschedule_agents: end.")
def do(self, agent):
if self.options.get('cleanup-ports'):
self._cleanup_ports(agent)
if self.options.get('reschedule'):
self._reschedule_agent(agent)
if self.options.get('remove-self'):
self._remove_self(agent)
def _test_healthy(self, agent_list, hostname):
rv = False
for agent in agent_list:
if agent['host'] == hostname and agent['alive']:
return True
return rv
def test_healthy(self, agent_type):
# OCF_FAILED_MASTER,
# http://www.linux-ha.org/doc/dev-guides/_literal_ocf_failed_master_literal_9.html
rc = 9
agentlist = self._get_agents_by_type(agent_type)
for hostname in self.options.get('test-hostnames'):
if self._test_healthy(agentlist, hostname):
return 0
return rc
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Neutron network node cleaning tool.')
parser.add_argument(
"-c",
"--auth-config",
dest="authconf",
default="/etc/neutron/neutron.conf",
help="Read authconfig from service file",
metavar="FILE")
parser.add_argument(
"-t",
"--auth-token",
dest="auth-token",
default=None,
help="Authenticating token (instead username/passwd)",
metavar="TOKEN")
parser.add_argument(
"-u",
"--admin-auth-url",
dest="admin-auth-url",
default=None,
help="Authenticating URL (admin)",
metavar="URL")
parser.add_argument(
"--retries",
dest="retries",
type=int,
default=50,
help="try NN retries for API call",
metavar="NN")
parser.add_argument(
"--sleep",
dest="sleep",
type=int,
default=2,
help="sleep seconds between retries",
metavar="SEC")
parser.add_argument(
"-a",
"--agent",
dest="agent",
action="append",
help="specyfy agents for cleaning",
required=True)
parser.add_argument(
"--cleanup-ports",
dest="cleanup-ports",
action="store_true",
default=False,
help="cleanup ports for given agents on this node")
parser.add_argument(
"--remove-self",
dest="remove-self",
action="store_true",
default=False,
help="remove ourselves from agent list")
parser.add_argument(
"--activeonly",
dest="activeonly",
action="store_true",
default=False,
help="cleanup only active ports")
parser.add_argument(
"--reschedule",
dest="reschedule",
action="store_true",
default=False,
help="reschedule given agents")
parser.add_argument(
"--remove-dead",
dest="remove-dead",
action="store_true",
default=False,
help="remove dead agents while rescheduling")
parser.add_argument(
"--test-alive-for-hostname",
dest="test-hostnames",
action="append",
help="testing agent's healthy for given hostname")
parser.add_argument(
"--external-bridge",
dest="external-bridge",
default="br-ex",
help="external bridge name",
metavar="IFACE")
parser.add_argument(
"--integration-bridge",
dest="integration-bridge",
default="br-int",
help="integration bridge name",
metavar="IFACE")
parser.add_argument(
"-l",
"--log",
dest="log",
action="store",
help="log to file instead of STDOUT")
parser.add_argument(
"--noop",
dest="noop",
action="store_true",
default=False,
help="do not execute, print to log instead")
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
default=False,
help="debug")
args = parser.parse_args()
RETRY_COUNT = args.retries
RETRY_DELAY = args.sleep
# setup logging
if args.log:
LOG = make_logger(
handler=logging.handlers.WatchedFileHandler(args.log))
if args.debug:
LOG.setLevel(logging.DEBUG)
LOG.info("Started: {0}".format(' '.join(sys.argv)))
cleaner = NeutronCleaner(options=vars(args), log=LOG)
rc = 0
if vars(args).get('test-hostnames'):
rc = cleaner.test_healthy(args.agent[0])
else:
for i in args.agent:
cleaner.do(i)
LOG.debug("End.")
sys.exit(rc)

View File

@ -11,9 +11,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#This class contains common changes both for HA and simple deployment mode.
#It enables VPN in Horizon and Neutron server.
# == Class: vpnaas::common
#
# This class contains common changes both for HA and simple deployment mode.
# It enables VPN in Horizon and Neutron server.
#
class vpnaas::common {
@ -52,4 +54,19 @@ class vpnaas::common {
Exec['enable_vpnaas_dashboard'] ~> Service[$vpnaas::params::dashboard_service]
Ini_subsetting['add_vpnaas_service_plugin'] ~> Service[$vpnaas::params::server_service]
if $primary_controller {
Package<| title == 'neutron-vpnaas-agent' |> -> Exec['neutron-db-sync']
exec { 'neutron-db-sync':
command => 'neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --service vpnaas upgrade head',
path => '/usr/bin',
refreshonly => true,
tries => 10,
try_sleep => 10,
}
Ini_subsetting['add_vpnaas_service_plugin'] ~> Exec['neutron-db-sync']
Exec['neutron-db-sync'] ~> Service <| title == 'neutron-server' |>
}
}

View File

@ -11,28 +11,18 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#This class is intended to deploy VPNaaS in HA mode.
#
# == Class: vpnaas::ha
#
# This class is intended to deploy VPNaaS in HA mode
#
class vpnaas::ha {
include vpnaas::params
include neutron::params
$fuel_settings = parseyaml($astute_settings_yaml)
$neutron_config = $fuel_settings['quantum_settings']
$debug = true
$syslog = $fuel_settings['use_syslog'] ? { default=>true }
$plugin_config = '/etc/neutron/l3_agent.ini'
file {'q-agent-cleanup.py':
path => '/usr/bin/q-agent-cleanup.py',
mode => '0755',
owner => root,
group => root,
source => 'puppet:///modules/vpnaas/q-agent-cleanup.py',
}
$neutron_config = hiera_hash('quantum_settings')
file { $vpnaas::params::vpn_agent_ocf_file:
mode => '0755',
@ -48,31 +38,23 @@ class vpnaas::ha {
enabled => false,
}
exec {'remove_p_neutron-l3-agent':
command => 'pcs resource disable p_neutron-l3-agent --wait=30',
path => '/usr/sbin:/usr/bin:/sbin:/bin',
if $primary_controller {
exec {'remove_p_neutron-l3-agent':
command => 'pcs resource delete p_neutron-l3-agent --wait=60',
onlyif => 'pcs resource show p_neutron-l3-agent 2>&1 > /dev/null',
path => '/usr/sbin:/usr/bin:/sbin:/bin',
}
Exec['remove_p_neutron-l3-agent'] -> Cluster::Corosync::Cs_service['vpn']
}
$csr_metadata = undef
$csr_complex_type = 'clone'
$csr_ms_metadata = { 'interleave' => true }
cluster::corosync::cs_with_service {'vpn-and-ovs':
first => "clone_p_${neutron::params::ovs_agent_service}",
second => "clone_p_${neutron::params::vpnaas_agent_service}"
}
cluster::corosync::cs_service {'vpn':
ocf_script => 'ocf-neutron-vpn-agent',
csr_parameters => {
'debug' => $debug,
'syslog' => $syslog,
'plugin_config' => $plugin_config,
'os_auth_url' => "http://${fuel_settings['management_vip']}:35357/v2.0/",
'tenant' => 'services',
'username' => undef,
'password' => $neutron_config['keystone']['admin_password'],
'multiple_agents' => $multiple_agents,
'remove_artifacts_on_stop_start' => true,
},
csr_metadata => $csr_metadata,
csr_complex_type => $csr_complex_type,
@ -87,8 +69,6 @@ class vpnaas::ha {
hasrestart => false,
}
Exec['remove_p_neutron-l3-agent'] -> Cluster::Corosync::Cs_service['vpn']
File['q-agent-cleanup.py'] -> Cluster::Corosync::Cs_service['vpn']
File[$vpnaas::params::vpn_agent_ocf_file] -> Cluster::Corosync::Cs_service['vpn'] ->
Cluster::Corosync::Cs_with_service['vpn-and-ovs'] -> Class['vpnaas::common']
File[$vpnaas::params::vpn_agent_ocf_file] -> Cluster::Corosync::Cs_service['vpn'] ->
Class['vpnaas::common']
}

View File

@ -11,6 +11,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: vpnaas
#
# This class is the main entry point for VPNaaS plugin
# It selects the appropriate class for the deployment mode
#
class vpnaas {

View File

@ -11,8 +11,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#This class contains necessary parameters for all other manifests
#
# == Class: vpnaas::params
#
# This class contains necessary parameters for all other manifests
#
class vpnaas::params {
@ -49,5 +52,4 @@ class vpnaas::params {
$openswan_package = 'openswan'
$vpn_agent_ocf_file = '/usr/lib/ocf/resource.d/fuel/ocf-neutron-vpn-agent'
$cleanup_script_file = '/etc/puppet/modules/cluster/files/q-agent-cleanup.py'
}

View File

@ -11,8 +11,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#This class deploys VPNaaS in simple mode.
#
# == Class: vpnaas::simple
#
# This class deploys VPNaaS in simple mode.
#
class vpnaas::simple {

View File

@ -47,7 +47,7 @@ class vpnaas::agent (
$package_ensure = present,
$enabled = true,
$manage_service = true,
$vpn_device_driver = 'neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver',
$vpn_device_driver = 'neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver',
$interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver',
$external_network_bridge = undef,
$ipsec_status_check_interval = '60',

View File

@ -1,14 +0,0 @@
{
"name": "vpnaas",
"version": "0.1.0",
"author": "Sergey Kolekonov",
"summary": "Module to manage vpnaas",
"license": "Apache 2.0",
"source": "",
"project_page": "skolekonov@mirantis.com",
"issues_url": "skolekonov@mirantis.com",
"dependencies": [
{"name":"puppetlabs-stdlib","version_requirement":">= 1.0.0"}
]
}

View File

@ -1,21 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'spec_helper'
describe 'vpnaas' do
context 'with defaults for all parameters' do
it { should contain_class('vpnaas') }
end
end

View File

@ -1,15 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
require 'puppetlabs_spec_helper/module_spec_helper'

View File

@ -1,26 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The baseline for module testing used by Puppet Labs is that each manifest
# should have a corresponding test manifest that declares that class or defined
# type.
#
# Tests are then run by using puppet apply --noop (to check for compilation
# errors and view a log of events) or by fully applying the test in a virtual
# environment (to compare the resulting system state to the desired state).
#
# Learn more about module testing here:
# http://docs.puppetlabs.com/guides/tests_smoke.html
#
include vpnaas

View File

@ -1,15 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
include vpnaas::ha

View File

@ -3,4 +3,6 @@ attributes:
metadata:
restrictions:
- condition: cluster:net_provider != 'neutron'
action: hide
action: hide
- condition: settings:neutron_advanced_configuration.neutron_dvr.value == true
message: "Neutron DVR must be disabled in order to use VPNaaS plugin"

View File

@ -3,11 +3,11 @@ name: vpnaas-plugin
# Human-readable name for your plugin
title: VPNaaS plugin for Neutron
# Plugin version
version: 1.1.0
version: 1.2.0
# Description
description: Neutron extension that introduces VPN feature set
# Required fuel version
fuel_version: ['6.1']
fuel_version: ['7.0']
# Specify license of your plugin
licenses: ['Apache License Version 2.0']
# Specify author or company name
@ -20,12 +20,12 @@ groups: ['network']
# The plugin is compatible with releases in the list
releases:
- os: ubuntu
version: 2014.2-6.1
version: 2015.1-7.0
mode: ['ha', 'multinode']
deployment_scripts_path: deployment_scripts/
repository_path: repositories/ubuntu
- os: centos
version: 2014.2-6.1
version: 2015.1-7.0
mode: ['ha', 'multinode']
deployment_scripts_path: deployment_scripts/
repository_path: repositories/centos

View File

@ -18,5 +18,5 @@
stage: pre_deployment
type: shell
parameters:
cmd: if [ -n "$(which pcs 2>/dev/null)" -a -n "$(pcs resource show p_neutron-vpn-agent 2>/dev/null)" ]; then pcs resource disable p_neutron-vpn-agent --wait=30; fi
timeout: 40
cmd: if [ -n "$(which pcs 2>/dev/null)" -a -n "$(pcs resource show p_neutron-vpn-agent 2>/dev/null)" ]; then pcs resource delete p_neutron-vpn-agent --wait=60; fi
timeout: 90