Merge master into telco (using imerge)

Change-Id: Ifcce0e0b1c972ad649a4f73eb153e78c508cb80e
This commit is contained in:
Yuriy Taraday 2015-08-25 15:50:32 +03:00
commit f307f654df
14 changed files with 379 additions and 302 deletions

View File

@ -12,13 +12,12 @@
import logging
from octane.commands.upgrade_db import get_controllers
from octane.commands.upgrade_node import ControllerUpgrade
from octane.commands.upgrade_node import wait_for_node
from octane.helpers import network
from octane.helpers.node_attributes import copy_disks
from octane.helpers.node_attributes import copy_ifaces
from octane import magic_consts
from octane.util import env as env_util
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
@ -71,7 +70,7 @@ def install_node(orig_id, seed_id, node_ids, isolated=False):
raise Exception("Original and seed environments have the same ID: %s",
orig_id)
orig_env = env(orig_id)
orig_node = next(get_controllers(orig_env))
orig_node = next(env_util.get_controllers(orig_env))
seed_env = env(seed_id)
seed_env.assign(nodes, orig_node.data['roles'])
for node in nodes:
@ -80,8 +79,7 @@ def install_node(orig_id, seed_id, node_ids, isolated=False):
update_node_settings(node, disk_info_fixture, nic_info_fixture)
seed_env.install_selected_nodes('provision', nodes)
for node in nodes:
wait_for_node(node, "provisioned")
env_util.wait_for_nodes(nodes, "provisioned")
for node in nodes:
# FIXME: properly call all handlers all over the place
@ -89,8 +87,7 @@ def install_node(orig_id, seed_id, node_ids, isolated=False):
if len(nodes) > 1:
isolate(nodes, seed_env)
seed_env.deploy_changes()
for node in nodes:
wait_for_node(node, "ready")
env_util.wait_for_node(nodes, "ready")
class InstallNodeCommand(cmd.Command):

View File

@ -14,23 +14,23 @@ import os
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.commands.upgrade_db import get_controllers
from octane.commands.upgrade_db import parse_crm_status
from octane.helpers import network
from octane import magic_consts
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import ssh
def start_corosync_services(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
for service in maintenance.parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'start', service], node=node)
def start_upstart_services(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
sftp = ssh.sftp(node)
try:
@ -45,7 +45,7 @@ def start_upstart_services(env):
def disconnect_networks(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
deployment_info = env.get_default_facts('deployment',
nodes=[node.data['id']])
@ -54,7 +54,7 @@ def disconnect_networks(env):
def connect_to_networks(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
backup_path = os.path.join(magic_consts.FUEL_CACHE,
'deployment_{0}.orig'
.format(env.id))
@ -67,7 +67,7 @@ def connect_to_networks(env):
def update_neutron_config(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
tenant_file = '%s/env-%s-service-tenant-id' % (magic_consts.FUEL_CACHE,
str(env.id))
with open(tenant_file) as f:

View File

@ -11,123 +11,20 @@
# under the License.
import os.path
import re
import shutil
import time
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from octane import magic_consts
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import ssh
def get_controllers(env):
found = False
for node in node_obj.Node.get_all():
if node.data['cluster'] != env.data['id']:
continue
if ('controller' in node.data['roles'] or
'controller' in node.data['pending_roles']):
yield node
found = True
if not found:
raise Exception("Can't find controller node in env %s" %
env.data['id'])
def delete_fuel_resources(seed_env):
node = next(get_controllers(seed_env))
sftp = ssh.sftp(node)
sftp.put(
os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
"/tmp/delete_fuel_resources.py",
)
ssh.call(
["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
node=node,
)
def disable_apis(env):
controllers = list(get_controllers(env))
maintenance_line = 'backend maintenance'
stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
'(?!.*level admin)')
mode_tcp_re = re.compile('mode\s+tcp')
use_backend_line = ' use_backend maintenance if TRUE'
for node in controllers:
sftp = ssh.sftp(node)
sftp.chdir('/etc/haproxy')
with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
found_maint_line = False
for line in old:
if maintenance_line in line:
found_maint_line = True
line = stats_socket_re.sub(r'\g<0> level admin', line)
new.write(line)
if not found_maint_line:
new.write(maintenance_line)
sftp.chdir('/etc/haproxy/conf.d')
for f in sftp.listdir():
with ssh.update_file(sftp, f) as (old, new):
contents = old.read()
if not mode_tcp_re.search(contents):
raise ssh.DontUpdateException
new.write(contents)
if not contents.endswith('\n'):
new.write('\n')
new.write(use_backend_line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
_default_exclude_services = ('mysql', 'haproxy', 'p_dns', 'p_ntp')
def parse_crm_status(status_out, exclude=_default_exclude_services):
for match in re.finditer(r"clone.*\[(.*)\]", status_out):
name = match.group(1)
if any(service in name for service in exclude):
continue
yield name
def stop_corosync_services(env):
controllers = list(get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'stop', service], node=node)
def stop_upstart_services(env):
controllers = list(get_controllers(env))
service_re = re.compile("^((?:%s)[^\s]*).*start/running" %
("|".join(magic_consts.OS_SERVICES),),
re.MULTILINE)
for node in controllers:
sftp = ssh.sftp(node)
try:
svc_file = sftp.open('/root/services_list')
except IOError:
with sftp.open('/root/services_list.tmp', 'w') as svc_file:
initctl_out, _ = ssh.call(['initctl', 'list'],
stdout=ssh.PIPE, node=node)
to_stop = []
for match in service_re.finditer(initctl_out):
service = match.group(1)
to_stop.append(service)
svc_file.write(service + '\n')
sftp.rename('/root/services_list.tmp', '/root/services_list')
else:
with svc_file:
to_stop = svc_file.read().splitlines()
for service in to_stop:
ssh.call(['stop', service], node=node)
def mysqldump_from_env(env):
node = next(get_controllers(env))
node = next(env_util.get_controllers(env))
local_fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz')
with ssh.popen(['sh', '-c', 'mysqldump --add-drop-database'
' --lock-all-tables --databases %s | gzip' %
@ -144,7 +41,7 @@ def mysqldump_from_env(env):
def mysqldump_restore_to_env(env, fname):
node = next(get_controllers(env))
node = next(env_util.get_controllers(env))
with open(fname, 'rb') as local_file:
with ssh.popen(['sh', '-c', 'zcat | mysql'],
stdin=ssh.PIPE, node=node) as proc:
@ -152,7 +49,7 @@ def mysqldump_restore_to_env(env, fname):
def db_sync(env):
node = next(get_controllers(env))
node = next(env_util.get_controllers(env))
ssh.call(['keystone-manage', 'db_sync'], node=node, parse_levels=True)
ssh.call(['nova-manage', 'db', 'sync'], node=node, parse_levels=True)
ssh.call(['heat-manage', 'db_sync'], node=node, parse_levels=True)
@ -165,12 +62,12 @@ def db_sync(env):
def upgrade_db(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
delete_fuel_resources(seed_env)
env_util.delete_fuel_resources(seed_env)
# Wait for Neutron to reconfigure networks
time.sleep(7) # FIXME: Use more deterministic way
disable_apis(orig_env)
stop_corosync_services(seed_env)
stop_upstart_services(seed_env)
maintenance.disable_apis(orig_env)
maintenance.stop_corosync_services(seed_env)
maintenance.stop_upstart_services(seed_env)
fname = mysqldump_from_env(orig_env)
mysqldump_restore_to_env(seed_env, fname)
db_sync(seed_env)

View File

@ -12,19 +12,16 @@
from __future__ import print_function
import json
import logging
import uuid
from octane.commands.upgrade_db import get_controllers
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import release as release_obj
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
@ -57,27 +54,12 @@ def set_cobbler_provision(env_id):
def upgrade_env(env_id):
target_release = find_deployable_release("Ubuntu")
LOG.info("Cloning env %s for release %s",
env_id, target_release.data['name'])
res, _ = subprocess.call(
["fuel2", "env", "clone", "-f", "json",
str(env_id), uuid.uuid4().hex, str(target_release.data['id'])],
stdout=subprocess.PIPE,
)
for kv in json.loads(res):
if kv['Field'] == 'id':
seed_id = kv['Value']
break
else:
raise Exception("Couldn't find new environment ID in fuel CLI output:"
"\n%s" % res)
return seed_id
return env_util.clone_env(env_id, target_release)
def write_service_tenant_id(env_id):
env = environment_obj.Environment(env_id)
node = get_controllers(env).next()
node = env_util.get_controllers(env).next()
tenant_id, _ = ssh.call(["bash", "-c", ". /root/openrc;",
"keystone tenant-list | ",
"awk -F\| '\$2 ~ /id/{print \$3}' | tr -d \ "],

View File

@ -12,54 +12,21 @@
import logging
import os
import time
import yaml
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def parse_tenant_get(output, field):
for line in output.splitlines()[3:-1]:
parts = line.split()
if parts[1] == field:
return parts[3]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_service_tenant_id(node):
fname = os.path.join(
magic_consts.FUEL_CACHE,
"env-{0}-service-tenant-id".format(node.data['cluster']),
)
if os.path.exists(fname):
with open(fname) as f:
return f.readline()
tenant_out, _ = ssh.call(
[
'sh', '-c',
'. /root/openrc; keystone tenant-get services',
],
node=node,
stdout=ssh.PIPE,
)
tenant_id = parse_tenant_get(tenant_out, 'id')
with open(fname, 'w') as f:
f.write(tenant_id)
return tenant_id
class UpgradeHandler(object):
def __init__(self, node, env, isolated):
self.node = node
@ -86,7 +53,8 @@ class ControllerUpgrade(UpgradeHandler):
self.gateway = None
def preupgrade(self):
self.service_tenant_id = get_service_tenant_id(self.node)
self.service_tenant_id = env_util.get_service_tenant_id(
self.env, self.node)
def predeploy(self):
deployment_info = self.env.get_default_facts('deployment')
@ -177,24 +145,6 @@ def call_role_upgrade_handlers(handlers, method):
method, type(handler).__name__)
def wait_for_node(node, status, timeout=60 * 60, check_freq=60):
node_id = node.data['id']
LOG.debug("Waiting for node %s to transition to status '%s'",
node_id, status)
started_at = time.time() # TODO: use monotonic timer
while True:
data = node.get_fresh_data()
if data['status'] == 'error':
raise Exception("Node %s fell into error status" % (node_id,))
if data['online'] and data['status'] == status:
LOG.info("Node %s transitioned to status '%s'", node_id, status)
return
if time.time() - started_at >= timeout:
raise Exception("Timeout waiting for node %s to transition to "
"status '%s'" % (node_id, status))
time.sleep(check_freq)
def upgrade_node(env_id, node_ids, isolated=False):
# From check_deployment_status
env = environment_obj.Environment(env_id)
@ -225,20 +175,9 @@ def upgrade_node(env_id, node_ids, isolated=False):
call_role_upgrade_handlers(role_handlers, 'preupgrade')
call_role_upgrade_handlers(role_handlers, 'prepare')
for node in nodes: # TODO: create wait_for_nodes method here
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
for node in nodes: # TODO: create wait_for_nodes method here
wait_for_node(node, "provisioned")
env_util.move_nodes(env, nodes)
call_role_upgrade_handlers(role_handlers, 'predeploy')
env.install_selected_nodes('deploy', nodes)
for node in nodes: # TODO: create wait_for_nodes method here
wait_for_node(node, "ready")
env_util.deploy_nodes(env, nodes)
call_role_upgrade_handlers(role_handlers, 'postdeploy')

View File

@ -16,8 +16,8 @@ import subprocess
from octane import magic_consts
from octane.util import ssh
from octane.commands.upgrade_db import get_controllers
from octane.helpers import transformations as ts
from octane.util import env as env_util
LOG = logging.getLogger(__name__)
@ -185,7 +185,7 @@ def delete_tunnels_from_node(node, bridge):
def delete_overlay_network(env, bridge):
nodes = list(get_controllers(env))
nodes = list(env_util.get_controllers(env))
for node in nodes:
delete_tunnels_from_node(node, bridge)

View File

@ -3,7 +3,7 @@
pycmd() {
if ! python -c 'import octane'; then
yum install -y python-paramiko
pip install -e "$CWD/.."
pip install --no-index -e "$CWD/.."
fi
local opts=""
if shopt -qo xtrace; then

29
octane/tests/test_env.py Normal file
View File

@ -0,0 +1,29 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import env as env_util
def test_parse_tenant_get():
res = env_util.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'
TENANT_GET_SAMPLE = """
+-------------+-----------------------------------+
| Property | Value |
+-------------+-----------------------------------+
| description | Tenant for the openstack services |
| enabled | True |
| id | e26c8079d61f46c48f9a6d606631ee5e |
| name | services |
+-------------+-----------------------------------+
"""[1:]

View File

@ -0,0 +1,68 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import maintenance
def test_parse_crm_status():
res = list(maintenance.parse_crm_status(CRM_STATUS_SAMPLE))
assert res == CRM_STATUS_PARSE_RESULT
CRM_STATUS_SAMPLE = """
Last updated: Fri Jul 31 15:02:15 2015
Last change: Thu Jul 30 14:56:04 2015
Stack: corosync
Current DC: node-1 (1) - partition with quorum
Version: 1.1.12-561c4cf
1 Nodes configured
16 Resources configured
Online: [ node-1 ]
Clone Set: clone_p_vrouter [p_vrouter]
Started: [ node-1 ]
vip__management (ocf::fuel:ns_IPaddr2): Started node-1
vip__public_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__management_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__public (ocf::fuel:ns_IPaddr2): Started node-1
Master/Slave Set: master_p_conntrackd [p_conntrackd]
Masters: [ node-1 ]
Clone Set: clone_p_haproxy [p_haproxy]
Started: [ node-1 ]
Clone Set: clone_p_dns [p_dns]
Started: [ node-1 ]
Clone Set: clone_p_mysql [p_mysql]
Started: [ node-1 ]
Master/Slave Set: master_p_rabbitmq-server [p_rabbitmq-server]
Masters: [ node-1 ]
Clone Set: clone_p_heat-engine [p_heat-engine]
Started: [ node-1 ]
Clone Set: clone_p_neutron-plugin-openvswitch-agent [p_neutron-plugin-openvswitch-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-dhcp-agent [p_neutron-dhcp-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-metadata-agent [p_neutron-metadata-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-l3-agent [p_neutron-l3-agent]
Started: [ node-1 ]
Clone Set: clone_p_ntp [p_ntp]
Started: [ node-1 ]
"""[1:] # noqa
CRM_STATUS_PARSE_RESULT = [
"p_vrouter",
"p_heat-engine",
"p_neutron-plugin-openvswitch-agent",
"p_neutron-dhcp-agent",
"p_neutron-metadata-agent",
"p_neutron-l3-agent",
]

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import upgrade_db
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_db.upgrade_db')
@ -19,59 +17,3 @@ def test_parser(mocker, octane_app):
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2)
def test_parse_crm_status():
res = list(upgrade_db.parse_crm_status(CRM_STATUS_SAMPLE))
assert res == CRM_STATUS_PARSE_RESULT
CRM_STATUS_SAMPLE = """
Last updated: Fri Jul 31 15:02:15 2015
Last change: Thu Jul 30 14:56:04 2015
Stack: corosync
Current DC: node-1 (1) - partition with quorum
Version: 1.1.12-561c4cf
1 Nodes configured
16 Resources configured
Online: [ node-1 ]
Clone Set: clone_p_vrouter [p_vrouter]
Started: [ node-1 ]
vip__management (ocf::fuel:ns_IPaddr2): Started node-1
vip__public_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__management_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__public (ocf::fuel:ns_IPaddr2): Started node-1
Master/Slave Set: master_p_conntrackd [p_conntrackd]
Masters: [ node-1 ]
Clone Set: clone_p_haproxy [p_haproxy]
Started: [ node-1 ]
Clone Set: clone_p_dns [p_dns]
Started: [ node-1 ]
Clone Set: clone_p_mysql [p_mysql]
Started: [ node-1 ]
Master/Slave Set: master_p_rabbitmq-server [p_rabbitmq-server]
Masters: [ node-1 ]
Clone Set: clone_p_heat-engine [p_heat-engine]
Started: [ node-1 ]
Clone Set: clone_p_neutron-plugin-openvswitch-agent [p_neutron-plugin-openvswitch-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-dhcp-agent [p_neutron-dhcp-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-metadata-agent [p_neutron-metadata-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-l3-agent [p_neutron-l3-agent]
Started: [ node-1 ]
Clone Set: clone_p_ntp [p_ntp]
Started: [ node-1 ]
"""[1:] # noqa
CRM_STATUS_PARSE_RESULT = [
"p_vrouter",
"p_heat-engine",
"p_neutron-plugin-openvswitch-agent",
"p_neutron-dhcp-agent",
"p_neutron-metadata-agent",
"p_neutron-l3-agent",
]

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import upgrade_node
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_node.upgrade_node')
@ -19,19 +17,3 @@ def test_parser(mocker, octane_app):
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, [2, 3], isolated=True)
def test_parse_tenant_get():
res = upgrade_node.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'
TENANT_GET_SAMPLE = """
+-------------+-----------------------------------+
| Property | Value |
+-------------+-----------------------------------+
| description | Tenant for the openstack services |
| enabled | True |
| id | e26c8079d61f46c48f9a6d606631ee5e |
| name | services |
+-------------+-----------------------------------+
"""[1:]

148
octane/util/env.py Normal file
View File

@ -0,0 +1,148 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os.path
import time
import uuid
from fuelclient.objects import node as node_obj
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
LOG = logging.getLogger(__name__)
def get_controllers(env):
found = False
for node in node_obj.Node.get_all():
if node.data['cluster'] != env.data['id']:
continue
if ('controller' in node.data['roles'] or
'controller' in node.data['pending_roles']):
yield node
found = True
if not found:
raise Exception("Can't find controller node in env %s" %
env.data['id'])
def clone_env(env_id, release):
LOG.info("Cloning env %s for release %s", env_id, release.data['name'])
res, _ = subprocess.call(
["fuel2", "env", "clone", "-f", "json",
str(env_id), uuid.uuid4().hex, str(release.data['id'])],
stdout=subprocess.PIPE,
)
for kv in json.loads(res):
if kv['Field'] == 'id':
seed_id = kv['Value']
break
else:
raise Exception("Couldn't find new environment ID in fuel CLI output:"
"\n%s" % res)
return seed_id
def delete_fuel_resources(env):
node = next(get_controllers(env))
sftp = ssh.sftp(node)
sftp.put(
os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
"/tmp/delete_fuel_resources.py",
)
ssh.call(
["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
node=node,
)
def parse_tenant_get(output, field):
for line in output.splitlines()[3:-1]:
parts = line.split()
if parts[1] == field:
return parts[3]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_service_tenant_id(env, node=None):
env_id = env.data['id']
fname = os.path.join(
magic_consts.FUEL_CACHE,
"env-{0}-service-tenant-id".format(env_id),
)
if os.path.exists(fname):
with open(fname) as f:
return f.readline()
if node is None:
node = next(get_controllers(env))
tenant_out, _ = ssh.call(
[
'sh', '-c',
'. /root/openrc; keystone tenant-get services',
],
node=node,
stdout=ssh.PIPE,
)
tenant_id = parse_tenant_get(tenant_out, 'id')
with open(fname, 'w') as f:
f.write(tenant_id)
return tenant_id
def wait_for_node(node, status, timeout=60 * 60, check_freq=60):
node_id = node.data['id']
LOG.debug("Waiting for node %s to transition to status '%s'",
node_id, status)
started_at = time.time() # TODO: use monotonic timer
while True:
data = node.get_fresh_data()
if data['status'] == 'error':
raise Exception("Node %s fell into error status" % (node_id,))
if data['online'] and data['status'] == status:
LOG.info("Node %s transitioned to status '%s'", node_id, status)
return
if time.time() - started_at >= timeout:
raise Exception("Timeout waiting for node %s to transition to "
"status '%s'" % (node_id, status))
time.sleep(check_freq)
def wait_for_nodes(nodes, status, timeout=60 * 60, check_freq=60):
for node in nodes: # TODO: do this smarter way
wait_for_node(node, status, timeout, check_freq)
def move_nodes(env, nodes):
env_id = env.data['id']
for node in nodes:
node_id = node.data['id']
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
wait_for_nodes(nodes, "provisioned")
def provision_nodes(env, nodes):
env.install_selected_nodes('provision', nodes)
wait_for_nodes(nodes, "provisioned")
def deploy_nodes(env, nodes):
env.install_selected_nodes('deploy', nodes)
wait_for_nodes(nodes, "ready")

View File

@ -0,0 +1,93 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
def disable_apis(env):
controllers = list(env_util.get_controllers(env))
maintenance_line = 'backend maintenance'
stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
'(?!.*level admin)')
mode_tcp_re = re.compile('mode\s+tcp')
use_backend_line = ' use_backend maintenance if TRUE'
for node in controllers:
sftp = ssh.sftp(node)
sftp.chdir('/etc/haproxy')
with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
found_maint_line = False
for line in old:
if maintenance_line in line:
found_maint_line = True
line = stats_socket_re.sub(r'\g<0> level admin', line)
new.write(line)
if not found_maint_line:
new.write(maintenance_line)
sftp.chdir('/etc/haproxy/conf.d')
for f in sftp.listdir():
with ssh.update_file(sftp, f) as (old, new):
contents = old.read()
if not mode_tcp_re.search(contents):
raise ssh.DontUpdateException
new.write(contents)
if not contents.endswith('\n'):
new.write('\n')
new.write(use_backend_line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
_default_exclude_services = ('mysql', 'haproxy', 'p_dns', 'p_ntp')
def parse_crm_status(status_out, exclude=_default_exclude_services):
for match in re.finditer(r"clone.*\[(.*)\]", status_out):
name = match.group(1)
if any(service in name for service in exclude):
continue
yield name
def stop_corosync_services(env):
controllers = list(env_util.get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'stop', service], node=node)
def stop_upstart_services(env):
controllers = list(env_util.get_controllers(env))
service_re = re.compile("^((?:%s)[^\s]*).*start/running" %
("|".join(magic_consts.OS_SERVICES),),
re.MULTILINE)
for node in controllers:
sftp = ssh.sftp(node)
try:
svc_file = sftp.open('/root/services_list')
except IOError:
with sftp.open('/root/services_list.tmp', 'w') as svc_file:
initctl_out, _ = ssh.call(['initctl', 'list'],
stdout=ssh.PIPE, node=node)
to_stop = []
for match in service_re.finditer(initctl_out):
service = match.group(1)
to_stop.append(service)
svc_file.write(service + '\n')
sftp.rename('/root/services_list.tmp', '/root/services_list')
else:
with svc_file:
to_stop = svc_file.read().splitlines()
for service in to_stop:
ssh.call(['stop', service], node=node)

View File

@ -4,7 +4,7 @@
pbr>=0.6,!=0.7,<1.0
# Cap versions of libs pulled by python-keystoneclient
stevedore>=1.1.0,<1.7.0
stevedore<1.7.0,>=1.1.0
oslo.config<2.0.0 # the last version with namespace package
# python-fuelclient needs these versions, python-keystoneclient pulls newer ones