Merge remote-tracking branch 'public/telco' into telco

This commit is contained in:
Artur Svechnikov 2015-08-26 14:49:43 +03:00
commit 4ef708703c
24 changed files with 464 additions and 416 deletions

View File

@ -12,13 +12,12 @@
import logging
from octane.commands.upgrade_db import get_controllers
from octane.commands.upgrade_node import ControllerUpgrade
from octane.commands.upgrade_node import wait_for_node
from octane.helpers import network
from octane.helpers.node_attributes import copy_disks
from octane.helpers.node_attributes import copy_ifaces
from octane import magic_consts
from octane.util import env as env_util
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
@ -71,7 +70,7 @@ def install_node(orig_id, seed_id, node_ids, isolated=False):
raise Exception("Original and seed environments have the same ID: %s",
orig_id)
orig_env = env(orig_id)
orig_node = next(get_controllers(orig_env))
orig_node = env_util.get_one_controller(orig_env)
seed_env = env(seed_id)
seed_env.assign(nodes, orig_node.data['roles'])
for node in nodes:
@ -79,18 +78,15 @@ def install_node(orig_id, seed_id, node_ids, isolated=False):
nic_info_fixture = orig_node.get_attribute('interfaces')
update_node_settings(node, disk_info_fixture, nic_info_fixture)
seed_env.install_selected_nodes('provision', nodes)
for node in nodes:
wait_for_node(node, "provisioned")
env_util.provision_nodes(seed_env, nodes)
for node in nodes:
# FIXME: properly call all handlers all over the place
ControllerUpgrade(node, seed_env, isolated=isolated).predeploy()
if len(nodes) > 1:
isolate(nodes, seed_env)
seed_env.deploy_changes()
for node in nodes:
wait_for_node(node, "ready")
env_util.deploy_changes(seed_env, nodes)
class InstallNodeCommand(cmd.Command):

View File

@ -19,14 +19,15 @@ from octane.util import docker
from octane.util import subprocess
def patch_puppet():
def patch_puppet(revert=False):
direction = "-R" if revert else "-N"
puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet")
for d in os.listdir(puppet_patch_dir):
d = os.path.join(puppet_patch_dir, d)
if not os.path.isdir(d):
continue
with open(os.path.join(d, "patch")) as patch:
subprocess.call(["patch", "-Np3"], stdin=patch,
subprocess.call(["patch", direction, "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
@ -42,9 +43,6 @@ def prepare():
os.makedirs(magic_consts.FUEL_CACHE)
subprocess.call(["yum", "-y", "install"] + magic_consts.PACKAGES)
subprocess.call(["pip", "install", "wheel"])
octane_fuelclient = os.path.join(magic_consts.CWD, '..',
'octane_fuelclient')
subprocess.call(["pip", "install", "-U", octane_fuelclient])
# From patch_all_containers
apply_patches()

View File

@ -9,27 +9,28 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.commands.upgrade_db import get_controllers
from octane.commands.upgrade_db import parse_crm_status
from octane.helpers import network
from octane import magic_consts
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import ssh
def start_corosync_services(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
for service in maintenance.parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'start', service], node=node)
def start_upstart_services(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
sftp = ssh.sftp(node)
try:
@ -44,7 +45,7 @@ def start_upstart_services(env):
def disconnect_networks(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
for node in controllers:
deployment_info = env.get_default_facts('deployment',
nodes=[node.data['id']])
@ -53,17 +54,20 @@ def disconnect_networks(env):
def connect_to_networks(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
backup_path = os.path.join(magic_consts.FUEL_CACHE,
'deployment_{0}.orig'
.format(env.id))
for node in controllers:
deployment_info = env.read_deployment_info('deployment',
magic_consts.FUEL_CACHE)
backup_path)
for info in deployment_info:
if info['role'] in ('primary-controller', 'controller'):
network.create_patch_ports(node, info)
def update_neutron_config(env):
controllers = list(get_controllers(env))
controllers = list(env_util.get_controllers(env))
tenant_file = '%s/env-%s-service-tenant-id' % (magic_consts.FUEL_CACHE,
str(env.id))
with open(tenant_file) as f:

View File

@ -11,123 +11,20 @@
# under the License.
import os.path
import re
import shutil
import time
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from octane import magic_consts
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import ssh
def get_controllers(env):
found = False
for node in node_obj.Node.get_all():
if node.data['cluster'] != env.data['id']:
continue
if ('controller' in node.data['roles'] or
'controller' in node.data['pending_roles']):
yield node
found = True
if not found:
raise Exception("Can't find controller node in env %s" %
env.data['id'])
def delete_fuel_resources(seed_env):
node = next(get_controllers(seed_env))
sftp = ssh.sftp(node)
sftp.put(
os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
"/tmp/delete_fuel_resources.py",
)
ssh.call(
["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
node=node,
)
def disable_apis(env):
controllers = list(get_controllers(env))
maintenance_line = 'backend maintenance'
stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
'(?!.*level admin)')
mode_tcp_re = re.compile('mode\s+tcp')
use_backend_line = ' use_backend maintenance if TRUE'
for node in controllers:
sftp = ssh.sftp(node)
sftp.chdir('/etc/haproxy')
with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
found_maint_line = False
for line in old:
if maintenance_line in line:
found_maint_line = True
line = stats_socket_re.sub(r'\g<0> level admin', line)
new.write(line)
if not found_maint_line:
new.write(maintenance_line)
sftp.chdir('/etc/haproxy/conf.d')
for f in sftp.listdir():
with ssh.update_file(sftp, f) as (old, new):
contents = old.read()
if not mode_tcp_re.search(contents):
raise ssh.DontUpdateException
new.write(contents)
if not contents.endswith('\n'):
new.write('\n')
new.write(use_backend_line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
_default_exclude_services = ('mysql', 'haproxy', 'p_dns', 'p_ntp')
def parse_crm_status(status_out, exclude=_default_exclude_services):
for match in re.finditer(r"clone.*\[(.*)\]", status_out):
name = match.group(1)
if any(service in name for service in exclude):
continue
yield name
def stop_corosync_services(env):
controllers = list(get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'stop', service], node=node)
def stop_upstart_services(env):
controllers = list(get_controllers(env))
service_re = re.compile("^((?:%s)[^\s]*).*start/running" %
("|".join(magic_consts.OS_SERVICES),),
re.MULTILINE)
for node in controllers:
sftp = ssh.sftp(node)
try:
svc_file = sftp.open('/root/services_list')
except IOError:
with sftp.open('/root/services_list.tmp', 'w') as svc_file:
initctl_out, _ = ssh.call(['initctl', 'list'],
stdout=ssh.PIPE, node=node)
to_stop = []
for match in service_re.finditer(initctl_out):
service = match.group(1)
to_stop.append(service)
svc_file.write(service + '\n')
sftp.rename('/root/services_list.tmp', '/root/services_list')
else:
with svc_file:
to_stop = svc_file.read().splitlines()
for service in to_stop:
ssh.call(['stop', service], node=node)
def mysqldump_from_env(env):
node = next(get_controllers(env))
node = env_util.get_one_controller(env)
local_fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz')
with ssh.popen(['sh', '-c', 'mysqldump --add-drop-database'
' --lock-all-tables --databases %s | gzip' %
@ -144,7 +41,7 @@ def mysqldump_from_env(env):
def mysqldump_restore_to_env(env, fname):
node = next(get_controllers(env))
node = env_util.get_one_controller(env)
with open(fname, 'rb') as local_file:
with ssh.popen(['sh', '-c', 'zcat | mysql'],
stdin=ssh.PIPE, node=node) as proc:
@ -152,7 +49,7 @@ def mysqldump_restore_to_env(env, fname):
def db_sync(env):
node = next(get_controllers(env))
node = env_util.get_one_controller(env)
ssh.call(['keystone-manage', 'db_sync'], node=node, parse_levels=True)
ssh.call(['nova-manage', 'db', 'sync'], node=node, parse_levels=True)
ssh.call(['heat-manage', 'db_sync'], node=node, parse_levels=True)
@ -165,12 +62,12 @@ def db_sync(env):
def upgrade_db(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
delete_fuel_resources(seed_env)
env_util.delete_fuel_resources(seed_env)
# Wait for Neutron to reconfigure networks
time.sleep(7) # FIXME: Use more deterministic way
disable_apis(orig_env)
stop_corosync_services(seed_env)
stop_upstart_services(seed_env)
maintenance.disable_apis(orig_env)
maintenance.stop_corosync_services(seed_env)
maintenance.stop_upstart_services(seed_env)
fname = mysqldump_from_env(orig_env)
mysqldump_restore_to_env(seed_env, fname)
db_sync(seed_env)

View File

@ -12,19 +12,16 @@
from __future__ import print_function
import json
import logging
import uuid
from octane.commands.upgrade_db import get_controllers
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import release as release_obj
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
@ -57,27 +54,12 @@ def set_cobbler_provision(env_id):
def upgrade_env(env_id):
target_release = find_deployable_release("Ubuntu")
LOG.info("Cloning env %s for release %s",
env_id, target_release.data['name'])
res, _ = subprocess.call(
["fuel2", "env", "clone", "-f", "json",
str(env_id), uuid.uuid4().hex, str(target_release.data['id'])],
stdout=subprocess.PIPE,
)
for kv in json.loads(res):
if kv['Field'] == 'id':
seed_id = kv['Value']
break
else:
raise Exception("Couldn't find new environment ID in fuel CLI output:"
"\n%s" % res)
return seed_id
return env_util.clone_env(env_id, target_release)
def write_service_tenant_id(env_id):
env = environment_obj.Environment(env_id)
node = get_controllers(env).next()
node = env_util.get_one_controller(env)
tenant_id, _ = ssh.call(["bash", "-c", ". /root/openrc;",
"keystone tenant-list | ",
"awk -F\| '\$2 ~ /id/{print \$3}' | tr -d \ "],

View File

@ -12,54 +12,21 @@
import logging
import os
import time
import yaml
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def parse_tenant_get(output, field):
for line in output.splitlines()[3:-1]:
parts = line.split()
if parts[1] == field:
return parts[3]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_service_tenant_id(node):
fname = os.path.join(
magic_consts.FUEL_CACHE,
"env-{0}-service-tenant-id".format(node.data['cluster']),
)
if os.path.exists(fname):
with open(fname) as f:
return f.readline()
tenant_out, _ = ssh.call(
[
'sh', '-c',
'. /root/openrc; keystone tenant-get services',
],
node=node,
stdout=ssh.PIPE,
)
tenant_id = parse_tenant_get(tenant_out, 'id')
with open(fname, 'w') as f:
f.write(tenant_id)
return tenant_id
class UpgradeHandler(object):
def __init__(self, node, env, isolated):
self.node = node
@ -86,11 +53,11 @@ class ControllerUpgrade(UpgradeHandler):
self.gateway = None
def preupgrade(self):
self.service_tenant_id = get_service_tenant_id(self.node)
self.service_tenant_id = env_util.get_service_tenant_id(
self.env, self.node)
def predeploy(self):
deployment_info = self.env.get_default_facts(
'deployment', nodes=[self.node.data['id']])
deployment_info = self.env.get_default_facts('deployment')
if self.isolated:
# From backup_deployment_info
backup_path = os.path.join(
@ -101,6 +68,8 @@ class ControllerUpgrade(UpgradeHandler):
os.makedirs(backup_path)
# Roughly taken from Environment.write_facts_to_dir
for info in deployment_info:
if info['uid'] != self.node.id:
continue
fname = os.path.join(
backup_path,
"{0}_{1}.yaml".format(info['role'], info['uid']),
@ -108,6 +77,8 @@ class ControllerUpgrade(UpgradeHandler):
with open(fname, 'w') as f:
yaml.dump(info, f, default_flow_style=False)
for info in deployment_info:
if info['uid'] != self.node.id:
continue
if self.isolated:
gw = get_admin_gateway(self.env)
transformations.remove_physical_ports(info)
@ -174,24 +145,6 @@ def call_role_upgrade_handlers(handlers, method):
method, type(handler).__name__)
def wait_for_node(node, status, timeout=60 * 60, check_freq=60):
node_id = node.data['id']
LOG.debug("Waiting for node %s to transition to status '%s'",
node_id, status)
started_at = time.time() # TODO: use monotonic timer
while True:
data = node.get_fresh_data()
if data['status'] == 'error':
raise Exception("Node %s fell into error status" % (node_id,))
if data['online'] and data['status'] == status:
LOG.info("Node %s transitioned to status '%s'", node_id, status)
return
if time.time() - started_at >= timeout:
raise Exception("Timeout waiting for node %s to transition to "
"status '%s'" % (node_id, status))
time.sleep(check_freq)
def upgrade_node(env_id, node_ids, isolated=False):
# From check_deployment_status
env = environment_obj.Environment(env_id)
@ -222,20 +175,9 @@ def upgrade_node(env_id, node_ids, isolated=False):
call_role_upgrade_handlers(role_handlers, 'preupgrade')
call_role_upgrade_handlers(role_handlers, 'prepare')
for node in nodes: # TODO: create wait_for_nodes method here
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
for node in nodes: # TODO: create wait_for_nodes method here
wait_for_node(node, "provisioned")
env_util.move_nodes(env, nodes)
call_role_upgrade_handlers(role_handlers, 'predeploy')
env.install_selected_nodes('deploy', nodes)
for node in nodes: # TODO: create wait_for_nodes method here
wait_for_node(node, "ready")
env_util.deploy_nodes(env, nodes)
call_role_upgrade_handlers(role_handlers, 'postdeploy')

View File

@ -1,3 +1,15 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient.commands import base
from fuelclient.commands import environment as env_commands
from fuelclient.common import data_utils
@ -31,34 +43,3 @@ class EnvClone(env_commands.EnvMixIn, base.BaseShowCommand):
)
new_env = data_utils.get_display_data_single(self.columns, new_env)
return (self.columns, new_env)
class EnvRelocateNode(env_commands.EnvMixIn, base.BaseCommand):
"""Update node assignment."""
def get_parser(self, prog_name):
parser = super(EnvRelocateNode, self).get_parser(prog_name)
parser.add_argument('node_id',
type=int,
help='ID of the node to upgrade.')
parser.add_argument('env_id',
type=str,
help='ID of the environment.')
return parser
def take_action(self, parsed_args):
# TODO(akscram): While the clone procedure is not a part of
# fuelclient.objects.Environment the connection
# will be called directly.
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/assign".format(parsed_args.env_id),
{
'node_id': parsed_args.node_id,
}
)
msg = ('Node {node_id} successfully relocated to the environment'
' {env_id}.\n'.format(
node_id=parsed_args.node_id,
env_id=parsed_args.env_id,
))
self.app.stdout.write(msg)

View File

@ -0,0 +1,45 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient.commands import base
from fuelclient.commands import environment as env_commands
class EnvMoveNode(env_commands.EnvMixIn, base.BaseCommand):
"""Update node assignment."""
def get_parser(self, prog_name):
parser = super(EnvMoveNode, self).get_parser(prog_name)
parser.add_argument('node_id',
type=int,
help='ID of the node to upgrade.')
parser.add_argument('env_id',
type=str,
help='ID of the environment.')
return parser
def take_action(self, parsed_args):
# TODO(akscram): While the clone procedure is not a part of
# fuelclient.objects.Environment the connection
# will be called directly.
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/assign".format(parsed_args.env_id),
{
'node_id': parsed_args.node_id,
}
)
msg = ('Node {node_id} successfully relocated to the environment'
' {env_id}.\n'.format(
node_id=parsed_args.node_id,
env_id=parsed_args.env_id,
))
self.app.stdout.write(msg)

View File

@ -16,8 +16,8 @@ import subprocess
from octane import magic_consts
from octane.util import ssh
from octane.commands.upgrade_db import get_controllers
from octane.helpers import transformations as ts
from octane.util import env as env_util
LOG = logging.getLogger(__name__)
@ -185,7 +185,7 @@ def delete_tunnels_from_node(node, bridge):
def delete_overlay_network(env, bridge):
nodes = list(get_controllers(env))
nodes = list(env_util.get_controllers(env))
for node in nodes:
delete_tunnels_from_node(node, bridge)

View File

@ -3,7 +3,7 @@
pycmd() {
if ! python -c 'import octane'; then
yum install -y python-paramiko
pip install -e "$CWD/.."
pip install --no-index -e "$CWD/.."
fi
local opts=""
if shopt -qo xtrace; then

29
octane/tests/test_env.py Normal file
View File

@ -0,0 +1,29 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import env as env_util
def test_parse_tenant_get():
res = env_util.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'
TENANT_GET_SAMPLE = """
+-------------+-----------------------------------+
| Property | Value |
+-------------+-----------------------------------+
| description | Tenant for the openstack services |
| enabled | True |
| id | e26c8079d61f46c48f9a6d606631ee5e |
| name | services |
+-------------+-----------------------------------+
"""[1:]

View File

@ -0,0 +1,68 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import maintenance
def test_parse_crm_status():
res = list(maintenance.parse_crm_status(CRM_STATUS_SAMPLE))
assert res == CRM_STATUS_PARSE_RESULT
CRM_STATUS_SAMPLE = """
Last updated: Fri Jul 31 15:02:15 2015
Last change: Thu Jul 30 14:56:04 2015
Stack: corosync
Current DC: node-1 (1) - partition with quorum
Version: 1.1.12-561c4cf
1 Nodes configured
16 Resources configured
Online: [ node-1 ]
Clone Set: clone_p_vrouter [p_vrouter]
Started: [ node-1 ]
vip__management (ocf::fuel:ns_IPaddr2): Started node-1
vip__public_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__management_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__public (ocf::fuel:ns_IPaddr2): Started node-1
Master/Slave Set: master_p_conntrackd [p_conntrackd]
Masters: [ node-1 ]
Clone Set: clone_p_haproxy [p_haproxy]
Started: [ node-1 ]
Clone Set: clone_p_dns [p_dns]
Started: [ node-1 ]
Clone Set: clone_p_mysql [p_mysql]
Started: [ node-1 ]
Master/Slave Set: master_p_rabbitmq-server [p_rabbitmq-server]
Masters: [ node-1 ]
Clone Set: clone_p_heat-engine [p_heat-engine]
Started: [ node-1 ]
Clone Set: clone_p_neutron-plugin-openvswitch-agent [p_neutron-plugin-openvswitch-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-dhcp-agent [p_neutron-dhcp-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-metadata-agent [p_neutron-metadata-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-l3-agent [p_neutron-l3-agent]
Started: [ node-1 ]
Clone Set: clone_p_ntp [p_ntp]
Started: [ node-1 ]
"""[1:] # noqa
CRM_STATUS_PARSE_RESULT = [
"p_vrouter",
"p_heat-engine",
"p_neutron-plugin-openvswitch-agent",
"p_neutron-dhcp-agent",
"p_neutron-metadata-agent",
"p_neutron-l3-agent",
]

View File

@ -20,8 +20,8 @@ def test_prepare_parser(mocker, octane_app):
def test_revert_parser(mocker, octane_app):
m = mocker.patch('octane.commands.prepare.apply_patches')
mock_apply = mocker.patch('octane.commands.prepare.apply_patches')
octane_app.run(["revert-prepare"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(revert=True)
mock_apply.assert_called_once_with(revert=True)

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import upgrade_db
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_db.upgrade_db')
@ -19,59 +17,3 @@ def test_parser(mocker, octane_app):
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2)
def test_parse_crm_status():
res = list(upgrade_db.parse_crm_status(CRM_STATUS_SAMPLE))
assert res == CRM_STATUS_PARSE_RESULT
CRM_STATUS_SAMPLE = """
Last updated: Fri Jul 31 15:02:15 2015
Last change: Thu Jul 30 14:56:04 2015
Stack: corosync
Current DC: node-1 (1) - partition with quorum
Version: 1.1.12-561c4cf
1 Nodes configured
16 Resources configured
Online: [ node-1 ]
Clone Set: clone_p_vrouter [p_vrouter]
Started: [ node-1 ]
vip__management (ocf::fuel:ns_IPaddr2): Started node-1
vip__public_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__management_vrouter (ocf::fuel:ns_IPaddr2): Started node-1
vip__public (ocf::fuel:ns_IPaddr2): Started node-1
Master/Slave Set: master_p_conntrackd [p_conntrackd]
Masters: [ node-1 ]
Clone Set: clone_p_haproxy [p_haproxy]
Started: [ node-1 ]
Clone Set: clone_p_dns [p_dns]
Started: [ node-1 ]
Clone Set: clone_p_mysql [p_mysql]
Started: [ node-1 ]
Master/Slave Set: master_p_rabbitmq-server [p_rabbitmq-server]
Masters: [ node-1 ]
Clone Set: clone_p_heat-engine [p_heat-engine]
Started: [ node-1 ]
Clone Set: clone_p_neutron-plugin-openvswitch-agent [p_neutron-plugin-openvswitch-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-dhcp-agent [p_neutron-dhcp-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-metadata-agent [p_neutron-metadata-agent]
Started: [ node-1 ]
Clone Set: clone_p_neutron-l3-agent [p_neutron-l3-agent]
Started: [ node-1 ]
Clone Set: clone_p_ntp [p_ntp]
Started: [ node-1 ]
"""[1:] # noqa
CRM_STATUS_PARSE_RESULT = [
"p_vrouter",
"p_heat-engine",
"p_neutron-plugin-openvswitch-agent",
"p_neutron-dhcp-agent",
"p_neutron-metadata-agent",
"p_neutron-l3-agent",
]

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import upgrade_node
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_node.upgrade_node')
@ -19,19 +17,3 @@ def test_parser(mocker, octane_app):
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, [2, 3], isolated=True)
def test_parse_tenant_get():
res = upgrade_node.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'
TENANT_GET_SAMPLE = """
+-------------+-----------------------------------+
| Property | Value |
+-------------+-----------------------------------+
| description | Tenant for the openstack services |
| enabled | True |
| id | e26c8079d61f46c48f9a6d606631ee5e |
| name | services |
+-------------+-----------------------------------+
"""[1:]

157
octane/util/env.py Normal file
View File

@ -0,0 +1,157 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os.path
import time
import uuid
from fuelclient.objects import node as node_obj
from octane import magic_consts
from octane.util import ssh
from octane.util import subprocess
LOG = logging.getLogger(__name__)
def get_controllers(env):
found = False
for node in node_obj.Node.get_all():
if node.data['cluster'] != env.data['id']:
continue
if ('controller' in node.data['roles'] or
'controller' in node.data['pending_roles']):
yield node
found = True
if not found:
raise Exception("Can't find controller node in env %s" %
env.data['id'])
def get_one_controller(env):
return next(get_controllers(env))
def clone_env(env_id, release):
LOG.info("Cloning env %s for release %s", env_id, release.data['name'])
res, _ = subprocess.call(
["fuel2", "env", "clone", "-f", "json",
str(env_id), uuid.uuid4().hex, str(release.data['id'])],
stdout=subprocess.PIPE,
)
for kv in json.loads(res):
if kv['Field'] == 'id':
seed_id = kv['Value']
break
else:
raise Exception("Couldn't find new environment ID in fuel CLI output:"
"\n%s" % res)
return seed_id
def delete_fuel_resources(env):
node = get_one_controller(env)
sftp = ssh.sftp(node)
sftp.put(
os.path.join(magic_consts.CWD, "helpers/delete_fuel_resources.py"),
"/tmp/delete_fuel_resources.py",
)
ssh.call(
["sh", "-c", ". /root/openrc; python /tmp/delete_fuel_resources.py"],
node=node,
)
def parse_tenant_get(output, field):
for line in output.splitlines()[3:-1]:
parts = line.split()
if parts[1] == field:
return parts[3]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_service_tenant_id(env, node=None):
env_id = env.data['id']
fname = os.path.join(
magic_consts.FUEL_CACHE,
"env-{0}-service-tenant-id".format(env_id),
)
if os.path.exists(fname):
with open(fname) as f:
return f.readline()
if node is None:
node = get_one_controller(env)
tenant_out, _ = ssh.call(
[
'sh', '-c',
'. /root/openrc; keystone tenant-get services',
],
node=node,
stdout=ssh.PIPE,
)
tenant_id = parse_tenant_get(tenant_out, 'id')
with open(fname, 'w') as f:
f.write(tenant_id)
return tenant_id
def wait_for_node(node, status, timeout=60 * 60, check_freq=60):
node_id = node.data['id']
LOG.debug("Waiting for node %s to transition to status '%s'",
node_id, status)
started_at = time.time() # TODO: use monotonic timer
while True:
data = node.get_fresh_data()
if data['status'] == 'error':
raise Exception("Node %s fell into error status" % (node_id,))
if data['online'] and data['status'] == status:
LOG.info("Node %s transitioned to status '%s'", node_id, status)
return
if time.time() - started_at >= timeout:
raise Exception("Timeout waiting for node %s to transition to "
"status '%s'" % (node_id, status))
time.sleep(check_freq)
def wait_for_nodes(nodes, status, timeout=60 * 60, check_freq=60):
for node in nodes: # TODO: do this smarter way
wait_for_node(node, status, timeout, check_freq)
def move_nodes(env, nodes):
env_id = env.data['id']
for node in nodes:
node_id = node.data['id']
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
wait_for_nodes(nodes, "provisioned")
def provision_nodes(env, nodes):
env.install_selected_nodes('provision', nodes)
wait_for_nodes(nodes, "provisioned")
def deploy_nodes(env, nodes):
env.install_selected_nodes('deploy', nodes)
wait_for_nodes(nodes, "ready")
def deploy_changes(env, nodes):
env.deploy_changes()
wait_for_nodes(nodes, "ready")

View File

@ -0,0 +1,93 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from octane import magic_consts
from octane.util import env as env_util
from octane.util import ssh
def disable_apis(env):
controllers = list(env_util.get_controllers(env))
maintenance_line = 'backend maintenance'
stats_socket_re = re.compile('stats\s+socket\s+/var/lib/haproxy/stats'
'(?!.*level admin)')
mode_tcp_re = re.compile('mode\s+tcp')
use_backend_line = ' use_backend maintenance if TRUE'
for node in controllers:
sftp = ssh.sftp(node)
sftp.chdir('/etc/haproxy')
with ssh.update_file(sftp, 'haproxy.cfg') as (old, new):
found_maint_line = False
for line in old:
if maintenance_line in line:
found_maint_line = True
line = stats_socket_re.sub(r'\g<0> level admin', line)
new.write(line)
if not found_maint_line:
new.write(maintenance_line)
sftp.chdir('/etc/haproxy/conf.d')
for f in sftp.listdir():
with ssh.update_file(sftp, f) as (old, new):
contents = old.read()
if not mode_tcp_re.search(contents):
raise ssh.DontUpdateException
new.write(contents)
if not contents.endswith('\n'):
new.write('\n')
new.write(use_backend_line)
ssh.call(['crm', 'resource', 'restart', 'p_haproxy'], node=node)
_default_exclude_services = ('mysql', 'haproxy', 'p_dns', 'p_ntp')
def parse_crm_status(status_out, exclude=_default_exclude_services):
for match in re.finditer(r"clone.*\[(.*)\]", status_out):
name = match.group(1)
if any(service in name for service in exclude):
continue
yield name
def stop_corosync_services(env):
controllers = list(env_util.get_controllers(env))
for node in controllers:
status_out, _ = ssh.call(['crm', 'status'], stdout=ssh.PIPE, node=node)
for service in parse_crm_status(status_out):
ssh.call(['crm', 'resource', 'stop', service], node=node)
def stop_upstart_services(env):
controllers = list(env_util.get_controllers(env))
service_re = re.compile("^((?:%s)[^\s]*).*start/running" %
("|".join(magic_consts.OS_SERVICES),),
re.MULTILINE)
for node in controllers:
sftp = ssh.sftp(node)
try:
svc_file = sftp.open('/root/services_list')
except IOError:
with sftp.open('/root/services_list.tmp', 'w') as svc_file:
initctl_out, _ = ssh.call(['initctl', 'list'],
stdout=ssh.PIPE, node=node)
to_stop = []
for match in service_re.finditer(initctl_out):
service = match.group(1)
to_stop.append(service)
svc_file.write(service + '\n')
sftp.rename('/root/services_list.tmp', '/root/services_list')
else:
with svc_file:
to_stop = svc_file.read().splitlines()
for service in to_stop:
ssh.call(['stop', service], node=node)

View File

@ -1,15 +0,0 @@
from setuptools import find_packages
from setuptools import setup
setup(name="octane_fuelclient",
version="0.0.0",
packages=find_packages(),
zip_safe=False,
entry_points={
'fuelclient': [
'env_clone = octaneclient.commands:EnvClone',
'env_move_node = octaneclient.commands:EnvRelocateNode',
]
}
)

View File

@ -1,16 +0,0 @@
#!/bin/bash -ex
host=${1:-"cz5545-fuel"}
branch=${2:-$(git rev-parse --abbrev-ref HEAD)}
remote="$(git remote -v | awk "/$host/ && /fetch/{print \$2}")"
location="${remote#ssh://$host}"
git push --force "$host" "$branch"
ssh $host \
"set -ex;" \
"cd ${location};" \
"git reset --hard $branch;" \
"git clean -x -d -f;" \
"pip install -U ${location}/octane_fuelclient;"

View File

@ -1,40 +0,0 @@
[tox]
minversion = 1.6
skipsdist = True
envlist = py26,py27,pep8
[testenv]
basepython = python2
usedevelop = True
install_command = pip install -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = pytest
commands =
py.test {posargs:octaneclient/test}
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:pep8]
deps = hacking==0.7
usedevelop = False
commands =
flake8 {posargs:octaneclient}
[testenv:cover]
deps = pytest-cov
commands =
py.test --cov-report html --cov-report term-missing --cov octaneclient {posargs:octaneclient/test}
[testenv:venv]
commands = {posargs:}
[flake8]
ignore = H234,H302,H802
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,__init__.py,docs
show-pep8 = True
show-source = True
count = True
[hacking]
import_exceptions = testtools.matchers

View File

@ -4,7 +4,7 @@
pbr>=0.6,!=0.7,<1.0
# Cap versions of libs pulled by python-keystoneclient
stevedore>=1.1.0,<1.7.0
stevedore<1.7.0,>=1.1.0
oslo.config<2.0.0 # the last version with namespace package
# python-fuelclient needs these versions, python-keystoneclient pulls newer ones

View File

@ -36,3 +36,6 @@ octane =
install-node = octane.commands.install_node:InstallNodeCommand
upgrade-control = octane.commands.upgrade_controlplane:UpgradeControlPlaneCommand
update-plugin-settings = octane.commands.update_plugin_settings:UpdatePluginSettingsCommand
fuelclient =
env_clone = octane.fuelclient.clone_env:EnvClone
env_move_node = octane.fuelclient.move_node:EnvMoveNode