From df306dfc51bb522fe52d04f8f938fb251012fe9f Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 12 Sep 2017 15:39:01 -0600 Subject: [PATCH] Retire Packaging Deb project repos This commit is part of a series to retire the Packaging Deb project. Step 2 is to remove all content from the project repos, replacing it with a README notification where to find ongoing work, and how to recover the repo if needed at some future point (as in https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project). Change-Id: I467ab5607b11d3fe72f7939f67ee891a3c331801 --- .gitignore | 10 - .gitreview | 4 - .testr.conf | 4 - CONTRIBUTING.rst | 16 - LICENSE | 176 --- MANIFEST.in | 7 - README | 14 + README.rst | 38 - bin/cfn-create-aws-symlinks | 87 -- bin/cfn-get-metadata | 85 -- bin/cfn-hup | 108 -- bin/cfn-init | 71 -- bin/cfn-push-stats | 286 ----- bin/cfn-signal | 118 -- doc/.gitignore | 2 - doc/Makefile | 153 --- doc/README.rst | 23 - doc/source/cfn-create-aws-symlinks.rst | 34 - doc/source/cfn-get-metadata.rst | 55 - doc/source/cfn-hup.rst | 34 - doc/source/cfn-init.rst | 46 - doc/source/cfn-push-stats.rst | 98 -- doc/source/cfn-signal.rst | 42 - doc/source/conf.py | 193 --- doc/source/index.rst | 17 - heat_cfntools/__init__.py | 0 heat_cfntools/cfntools/__init__.py | 0 heat_cfntools/cfntools/cfn_helper.py | 1533 ------------------------ heat_cfntools/tests/__init__.py | 0 heat_cfntools/tests/test_cfn_helper.py | 1419 ---------------------- heat_cfntools/tests/test_cfn_hup.py | 91 -- requirements.txt | 4 - setup.cfg | 43 - setup.py | 22 - test-requirements.txt | 9 - tools/lintstack.py | 198 --- tools/lintstack.sh | 59 - tox.ini | 35 - 38 files changed, 14 insertions(+), 5120 deletions(-) delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .testr.conf delete mode 100644 CONTRIBUTING.rst delete mode 100644 LICENSE delete mode 100644 MANIFEST.in create mode 100644 README delete mode 100644 README.rst delete mode 100755 bin/cfn-create-aws-symlinks delete mode 100755 bin/cfn-get-metadata delete mode 100755 bin/cfn-hup delete mode 100755 bin/cfn-init delete mode 100755 bin/cfn-push-stats delete mode 100755 bin/cfn-signal delete mode 100644 doc/.gitignore delete mode 100644 doc/Makefile delete mode 100644 doc/README.rst delete mode 100644 doc/source/cfn-create-aws-symlinks.rst delete mode 100644 doc/source/cfn-get-metadata.rst delete mode 100644 doc/source/cfn-hup.rst delete mode 100644 doc/source/cfn-init.rst delete mode 100644 doc/source/cfn-push-stats.rst delete mode 100644 doc/source/cfn-signal.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/index.rst delete mode 100644 heat_cfntools/__init__.py delete mode 100644 heat_cfntools/cfntools/__init__.py delete mode 100644 heat_cfntools/cfntools/cfn_helper.py delete mode 100644 heat_cfntools/tests/__init__.py delete mode 100644 heat_cfntools/tests/test_cfn_helper.py delete mode 100644 heat_cfntools/tests/test_cfn_hup.py delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100755 setup.py delete mode 100644 test-requirements.txt delete mode 100755 tools/lintstack.py delete mode 100755 tools/lintstack.sh delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 65e0127..0000000 --- a/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -*.pyc -*.swp -build -dist -heat_cfntools.egg-info/ -.testrepository/ -subunit.log -.tox -AUTHORS -ChangeLog diff --git a/.gitreview b/.gitreview deleted file mode 100644 index ef1a054..0000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/heat-cfntools.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 8528c86..0000000 --- a/.testr.conf +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ ./heat_cfntools/tests $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index 8cc4989..0000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,16 +0,0 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/heat diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 3368e4b..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include CONTRIBUTING.rst -include MANIFEST.in -include README.rst -include AUTHORS LICENSE -include ChangeLog -graft doc -graft tools diff --git a/README b/README new file mode 100644 index 0000000..8fcd2b2 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For ongoing work on maintaining OpenStack packages in the Debian +distribution, please see the Debian OpenStack packaging team at +https://wiki.debian.org/OpenStack/. + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index dfe9c50..0000000 --- a/README.rst +++ /dev/null @@ -1,38 +0,0 @@ -======================== -Team and repository tags -======================== - -.. image:: http://governance.openstack.org/badges/heat-cfntools.svg - :target: http://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on - -========================= -Heat CloudFormation Tools -========================= - -There are several bootstrap methods for cloudformations: - -1. Create image with application ready to go -2. Use cloud-init to run a startup script passed as userdata to the nova - server create -3. Use the CloudFormation instance helper scripts - -This package contains files required for choice #3. - -cfn-init - - Reads the AWS::CloudFormation::Init for the instance resource, - installs packages, and starts services -cfn-signal - - Waits for an application to be ready before continuing, ie: - supporting the WaitCondition feature -cfn-hup - - Handle updates from the UpdateStack CloudFormation API call - -* Free software: Apache license -* Source: http://git.openstack.org/cgit/openstack/heat-cfntools -* Bugs: http://bugs.launchpad.net/heat-cfntools - -Related projects ----------------- -* http://wiki.openstack.org/Heat diff --git a/bin/cfn-create-aws-symlinks b/bin/cfn-create-aws-symlinks deleted file mode 100755 index bd5e6d7..0000000 --- a/bin/cfn-create-aws-symlinks +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Creates symlinks for the cfn-* scripts in this directory to /opt/aws/bin -""" -import argparse -import glob -import os -import os.path - - -def create_symlink(source_file, target_file, override=False): - if os.path.exists(target_file): - if (override): - os.remove(target_file) - else: - print('%s already exists, will not replace with symlink' - % target_file) - return - print('%s -> %s' % (source_file, target_file)) - os.symlink(source_file, target_file) - - -def check_dirs(source_dir, target_dir): - print('%s -> %s' % (source_dir, target_dir)) - - if source_dir == target_dir: - print('Source and target are the same %s' % target_dir) - return False - - if not os.path.exists(target_dir): - try: - os.makedirs(target_dir) - except OSError as exc: - print('Could not create target directory %s: %s' - % (target_dir, exc)) - return False - return True - - -def create_symlinks(source_dir, target_dir, glob_pattern, override): - source_files = glob.glob(os.path.join(source_dir, glob_pattern)) - for source_file in source_files: - target_file = os.path.join(target_dir, os.path.basename(source_file)) - create_symlink(source_file, target_file, override=override) - -if __name__ == '__main__': - description = 'Creates symlinks for the cfn-* scripts to /opt/aws/bin' - parser = argparse.ArgumentParser(description=description) - parser.add_argument( - '-t', '--target', - dest="target_dir", - help="Target directory to create symlinks", - default='/opt/aws/bin', - required=False) - parser.add_argument( - '-s', '--source', - dest="source_dir", - help="Source directory to create symlinks from. " - "Defaults to the directory where this script is", - default='/usr/bin', - required=False) - parser.add_argument( - '-f', '--force', - dest="force", - action='store_true', - help="If specified, will create symlinks even if " - "there is already a target file", - required=False) - args = parser.parse_args() - - if not check_dirs(args.source_dir, args.target_dir): - exit(1) - - create_symlinks(args.source_dir, args.target_dir, 'cfn-*', args.force) diff --git a/bin/cfn-get-metadata b/bin/cfn-get-metadata deleted file mode 100755 index 93bd57d..0000000 --- a/bin/cfn-get-metadata +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn-get-metadata CloudFormation functionality -""" -import argparse -import logging - - -from heat_cfntools.cfntools import cfn_helper - -description = " " -parser = argparse.ArgumentParser(description=description) -parser.add_argument('-s', '--stack', - dest="stack_name", - help="A Heat stack name", - required=True) -parser.add_argument('-r', '--resource', - dest="logical_resource_id", - help="A Heat logical resource ID", - required=True) -parser.add_argument('--access-key', - dest="access_key", - help="A Keystone access key", - required=False) -parser.add_argument('--secret-key', - dest="secret_key", - help="A Keystone secret key", - required=False) -parser.add_argument('--region', - dest="region", - help="Openstack region", - required=False) -parser.add_argument('--credential-file', - dest="credential_file", - help="credential-file", - required=False) -parser.add_argument('-u', '--url', - dest="url", - help="service url", - required=False) -parser.add_argument('-k', '--key', - dest="key", - help="key", - required=False) -args = parser.parse_args() - -if not args.stack_name: - print('The Stack name must not be empty.') - exit(1) - -if not args.logical_resource_id: - print('The Resource ID must not be empty') - exit(1) - -log_format = '%(levelname)s [%(asctime)s] %(message)s' -logging.basicConfig(format=log_format, level=logging.DEBUG) - -LOG = logging.getLogger('cfntools') -log_file_name = "/var/log/cfn-get-metadata.log" -file_handler = logging.FileHandler(log_file_name) -file_handler.setFormatter(logging.Formatter(log_format)) -LOG.addHandler(file_handler) - -metadata = cfn_helper.Metadata(args.stack_name, - args.logical_resource_id, - access_key=args.access_key, - secret_key=args.secret_key, - region=args.region, - credentials_file=args.credential_file) -metadata.retrieve() -LOG.debug(str(metadata)) -metadata.display(args.key) diff --git a/bin/cfn-hup b/bin/cfn-hup deleted file mode 100755 index 690eca4..0000000 --- a/bin/cfn-hup +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn-hup CloudFormation functionality -""" -import argparse -import logging -import os -import os.path - - -from heat_cfntools.cfntools import cfn_helper - -description = " " -parser = argparse.ArgumentParser(description=description) -parser.add_argument('-c', '--config', - dest="config_dir", - help="Hook Config Directory", - required=False, - default='/etc/cfn/hooks.d') -parser.add_argument('-f', '--no-daemon', - dest="no_daemon", - action="store_true", - help="Do not run as a daemon", - required=False) -parser.add_argument('-v', '--verbose', - action="store_true", - dest="verbose", - help="Verbose logging", - required=False) -args = parser.parse_args() - -# Setup logging -log_format = '%(levelname)s [%(asctime)s] %(message)s' -log_file_name = "/var/log/cfn-hup.log" -log_level = logging.INFO -if args.verbose: - log_level = logging.DEBUG -logging.basicConfig(filename=log_file_name, - format=log_format, - level=log_level) - -LOG = logging.getLogger('cfntools') - -main_conf_path = '/etc/cfn/cfn-hup.conf' -try: - main_config_file = open(main_conf_path) -except IOError as exc: - LOG.error('Could not open main configuration at %s' % main_conf_path) - exit(1) - -config_files = [] -hooks_conf_path = '/etc/cfn/hooks.conf' -if os.path.exists(hooks_conf_path): - try: - config_files.append(open(hooks_conf_path)) - except IOError as exc: - LOG.exception(exc) - -if args.config_dir and os.path.exists(args.config_dir): - try: - for f in os.listdir(args.config_dir): - config_files.append(open(os.path.join(args.config_dir, f))) - - except OSError as exc: - LOG.exception(exc) - -if not config_files: - LOG.error('No hook files found at %s or %s' % (hooks_conf_path, - args.config_dir)) - exit(1) - -try: - mainconfig = cfn_helper.HupConfig([main_config_file] + config_files) -except Exception as ex: - LOG.error('Cannot load configuration: %s' % str(ex)) - exit(1) - -if not mainconfig.unique_resources_get(): - LOG.error('No hooks were found. Add some to %s or %s' % (hooks_conf_path, - args.config_dir)) - exit(1) - - -for r in mainconfig.unique_resources_get(): - LOG.debug('Checking resource %s' % r) - metadata = cfn_helper.Metadata(mainconfig.stack, - r, - credentials_file=mainconfig.credential_file, - region=mainconfig.region) - metadata.retrieve() - try: - metadata.cfn_hup(mainconfig.hooks) - except Exception as e: - LOG.exception("Error processing metadata") - exit(1) diff --git a/bin/cfn-init b/bin/cfn-init deleted file mode 100755 index 99b369e..0000000 --- a/bin/cfn-init +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn-init CloudFormation functionality -""" -import argparse -import logging - - -from heat_cfntools.cfntools import cfn_helper - -description = " " -parser = argparse.ArgumentParser(description=description) -parser.add_argument('-s', '--stack', - dest="stack_name", - help="A Heat stack name", - required=False) -parser.add_argument('-r', '--resource', - dest="logical_resource_id", - help="A Heat logical resource ID", - required=False) -parser.add_argument('--access-key', - dest="access_key", - help="A Keystone access key", - required=False) -parser.add_argument('--secret-key', - dest="secret_key", - help="A Keystone secret key", - required=False) -parser.add_argument('--region', - dest="region", - help="Openstack region", - required=False) -parser.add_argument('-c', '--configsets', - dest="configsets", - help="An optional list of configSets (default: default)", - required=False) -args = parser.parse_args() - -log_format = '%(levelname)s [%(asctime)s] %(message)s' -log_file_name = "/var/log/cfn-init.log" -logging.basicConfig(filename=log_file_name, - format=log_format, - level=logging.DEBUG) - -LOG = logging.getLogger('cfntools') - -metadata = cfn_helper.Metadata(args.stack_name, - args.logical_resource_id, - access_key=args.access_key, - secret_key=args.secret_key, - region=args.region, - configsets=args.configsets) -metadata.retrieve() -try: - metadata.cfn_init() -except Exception as e: - LOG.exception("Error processing metadata") - exit(1) diff --git a/bin/cfn-push-stats b/bin/cfn-push-stats deleted file mode 100755 index d1171ee..0000000 --- a/bin/cfn-push-stats +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn-push-stats CloudFormation functionality -""" -import argparse -import logging -import os -import subprocess - -# Override BOTO_CONFIG, which makes boto look only at the specified -# config file, instead of the default locations -os.environ['BOTO_CONFIG'] = '/var/lib/heat-cfntools/cfn-boto-cfg' -from boto.ec2 import cloudwatch - - -log_format = '%(levelname)s [%(asctime)s] %(message)s' -log_file_name = "/var/log/cfn-push-stats.log" -logging.basicConfig(filename=log_file_name, - format=log_format) -LOG = logging.getLogger('cfntools') - -try: - import psutil -except ImportError: - LOG.warning("psutil not available. If you want process and memory " - "statistics, you need to install it.") - -from heat_cfntools.cfntools import cfn_helper - -KILO = 1024 -MEGA = 1048576 -GIGA = 1073741824 -unit_map = {'bytes': 1, - 'kilobytes': KILO, - 'megabytes': MEGA, - 'gigabytes': GIGA} - -description = " " -parser = argparse.ArgumentParser(description=description) -parser.add_argument('-v', '--verbose', action="store_true", - help="Verbose logging", required=False) -parser.add_argument('--credential-file', dest="credential_file", - help="credential-file", required=False, - default='/etc/cfn/cfn-credentials') -parser.add_argument('--service-failure', required=False, action="store_true", - help='Reports a service failure.') -parser.add_argument('--mem-util', required=False, action="store_true", - help='Reports memory utilization in percentages.') -parser.add_argument('--mem-used', required=False, action="store_true", - help='Reports memory used (excluding cache/buffers) ' - 'in megabytes.') -parser.add_argument('--mem-avail', required=False, action="store_true", - help='Reports available memory (including cache/buffers) ' - 'in megabytes.') -parser.add_argument('--swap-util', required=False, action="store_true", - help='Reports swap utilization in percentages.') -parser.add_argument('--swap-used', required=False, action="store_true", - help='Reports allocated swap space in megabytes.') -parser.add_argument('--disk-space-util', required=False, action="store_true", - help='Reports disk space utilization in percentages.') -parser.add_argument('--disk-space-used', required=False, action="store_true", - help='Reports allocated disk space in gigabytes.') -parser.add_argument('--disk-space-avail', required=False, action="store_true", - help='Reports available disk space in gigabytes.') -parser.add_argument('--memory-units', required=False, default='megabytes', - help='Specifies units for memory metrics.') -parser.add_argument('--disk-units', required=False, default='megabytes', - help='Specifies units for disk metrics.') -parser.add_argument('--disk-path', required=False, default='/', - help='Selects the disk by the path on which to report.') -parser.add_argument('--cpu-util', required=False, action="store_true", - help='Reports cpu utilization in percentages.') -parser.add_argument('--haproxy', required=False, action='store_true', - help='Reports HAProxy loadbalancer usage.') -parser.add_argument('--haproxy-latency', required=False, action='store_true', - help='Reports HAProxy latency') -parser.add_argument('--heartbeat', required=False, action='store_true', - help='Sends a Heartbeat.') -parser.add_argument('--watch', required=False, - help='the name of the watch to post to.') -parser.add_argument('--metric', required=False, - help='name of the metric to post to.') -parser.add_argument('--units', required=False, - help='name of the units to be used for the specified' - 'metric') -parser.add_argument('--value', required=False, - help='value to post to the specified metric') -args = parser.parse_args() - -LOG.debug('cfn-push-stats called %s ' % (str(args))) - -credentials = cfn_helper.parse_creds_file(args.credential_file) - -namespace = 'system/linux' -data = {} - -# Logging -# ======= -if args.verbose: - LOG.setLevel(logging.DEBUG) - -# Generic user-specified metric -# ============================= -if args.metric and args.units and args.value: - data[args.metric] = { - 'Value': args.value, - 'Units': args.units} - -# service failure -# =============== -if args.service_failure: - data['ServiceFailure'] = { - 'Value': 1, - 'Units': 'Counter'} - -# heartbeat -# ======== -if args.heartbeat: - data['Heartbeat'] = { - 'Value': 1, - 'Units': 'Counter'} - -# memory space -# ============ -if args.mem_util or args.mem_used or args.mem_avail: - mem = psutil.phymem_usage() -if args.mem_util: - data['MemoryUtilization'] = { - 'Value': mem.percent, - 'Units': 'Percent'} -if args.mem_used: - data['MemoryUsed'] = { - 'Value': mem.used / unit_map[args.memory_units], - 'Units': args.memory_units} -if args.mem_avail: - data['MemoryAvailable'] = { - 'Value': mem.free / unit_map[args.memory_units], - 'Units': args.memory_units} - -# swap space -# ========== -if args.swap_util or args.swap_used: - swap = psutil.virtmem_usage() -if args.swap_util: - data['SwapUtilization'] = { - 'Value': swap.percent, - 'Units': 'Percent'} -if args.swap_used: - data['SwapUsed'] = { - 'Value': swap.used / unit_map[args.memory_units], - 'Units': args.memory_units} - -# disk space -# ========== -if args.disk_space_util or args.disk_space_used or args.disk_space_avail: - disk = psutil.disk_usage(args.disk_path) -if args.disk_space_util: - data['DiskSpaceUtilization'] = { - 'Value': disk.percent, - 'Units': 'Percent'} -if args.disk_space_used: - data['DiskSpaceUsed'] = { - 'Value': disk.used / unit_map[args.disk_units], - 'Units': args.disk_units} -if args.disk_space_avail: - data['DiskSpaceAvailable'] = { - 'Value': disk.free / unit_map[args.disk_units], - 'Units': args.disk_units} - -# cpu utilization -# =============== -if args.cpu_util: - # blocks for 1 second. - cpu_percent = psutil.cpu_percent(interval=1) - data['CPUUtilization'] = { - 'Value': cpu_percent, - 'Units': 'Percent'} - - -# HAProxy -# ======= -def parse_haproxy_unix_socket(res, latency_only=False): - # http://docs.amazonwebservices.com/ElasticLoadBalancing/latest - # /DeveloperGuide/US_MonitoringLoadBalancerWithCW.html - - type_map = {'FRONTEND': '0', 'BACKEND': '1', 'SERVER': '2', 'SOCKET': '3'} - num_map = {'status': 17, 'svname': 1, 'check_duration': 38, 'type': 32, - 'req_tot': 48, 'hrsp_2xx': 40, 'hrsp_3xx': 41, 'hrsp_4xx': 42, - 'hrsp_5xx': 43} - - def add_stat(key, value, unit='Counter'): - res[key] = {'Value': value, - 'Units': unit} - - echo = subprocess.Popen(['echo', 'show stat'], - stdout=subprocess.PIPE) - socat = subprocess.Popen(['socat', 'stdio', '/tmp/.haproxy-stats'], - stdin=echo.stdout, - stdout=subprocess.PIPE) - end_pipe = socat.stdout - raw = [l.strip('\n').split(',') - for l in end_pipe if l[0] != '#' and len(l) > 2] - latency = 0 - up_count = 0 - down_count = 0 - for f in raw: - if latency_only is False: - if f[num_map['type']] == type_map['FRONTEND']: - add_stat('RequestCount', f[num_map['req_tot']]) - add_stat('HTTPCode_ELB_4XX', f[num_map['hrsp_4xx']]) - add_stat('HTTPCode_ELB_5XX', f[num_map['hrsp_5xx']]) - elif f[num_map['type']] == type_map['BACKEND']: - add_stat('HTTPCode_Backend_2XX', f[num_map['hrsp_2xx']]) - add_stat('HTTPCode_Backend_3XX', f[num_map['hrsp_3xx']]) - add_stat('HTTPCode_Backend_4XX', f[num_map['hrsp_4xx']]) - add_stat('HTTPCode_Backend_5XX', f[num_map['hrsp_5xx']]) - else: - if f[num_map['status']] == 'UP': - up_count = up_count + 1 - else: - down_count = down_count + 1 - if f[num_map['check_duration']] != '': - latency = max(float(f[num_map['check_duration']]), latency) - - # note: haproxy's check_duration is in ms, but Latency is in seconds - add_stat('Latency', str(latency / 1000), unit='Seconds') - if latency_only is False: - add_stat('HealthyHostCount', str(up_count)) - add_stat('UnHealthyHostCount', str(down_count)) - - -def send_stats(info): - - # Create boto connection, need the hard-coded port/path as boto - # can't read these from config values in BOTO_CONFIG - # FIXME : currently only http due to is_secure=False - client = cloudwatch.CloudWatchConnection( - aws_access_key_id=credentials['AWSAccessKeyId'], - aws_secret_access_key=credentials['AWSSecretKey'], - is_secure=False, port=8003, path="/v1", debug=0) - - # Then we send the metric datapoints passed in "info", note this could - # contain multiple keys as the options parsed above are not exclusive - # The alarm name is passed as a dimension so the metric datapoint can - # be associated with the alarm/watch in the engine - metadata = cfn_helper.Metadata('not-used', None) - metric_dims = metadata.get_tags() - if args.watch: - metric_dims['AlarmName'] = args.watch - for key in info: - LOG.info("Sending metric %s, Units %s, Value %s" % - (key, info[key]['Units'], info[key]['Value'])) - client.put_metric_data(namespace=namespace, - name=key, - value=info[key]['Value'], - timestamp=None, # means use "now" in the engine - unit=info[key]['Units'], - dimensions=metric_dims, - statistics=None) - - -if args.haproxy: - namespace = 'AWS/ELB' - lb_data = {} - parse_haproxy_unix_socket(lb_data) - send_stats(lb_data) -elif args.haproxy_latency: - namespace = 'AWS/ELB' - lb_data = {} - parse_haproxy_unix_socket(lb_data, latency_only=True) - send_stats(lb_data) -else: - send_stats(data) diff --git a/bin/cfn-signal b/bin/cfn-signal deleted file mode 100755 index 7fce58e..0000000 --- a/bin/cfn-signal +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn-signal CloudFormation functionality -""" -import argparse -import logging -import sys - - -from heat_cfntools.cfntools import cfn_helper - - -description = " " -parser = argparse.ArgumentParser(description=description) -parser.add_argument('-s', '--success', - dest="success", - help="signal status to report", - default='true', - required=False) -parser.add_argument('-r', '--reason', - dest="reason", - help="The reason for the failure", - default="Configuration Complete", - required=False) -parser.add_argument('-d', '--data', - dest="data", - default="Application has completed configuration.", - help="The data to send", - required=False) -parser.add_argument('-i', '--id', - dest="unique_id", - help="the unique id to send back to the WaitCondition", - default=None, - required=False) -parser.add_argument('-e', '--exit-code', - dest="exit_code", - help="The exit code from a process to interpret", - default=None, - required=False) -parser.add_argument('--exit', - dest="exit", - help="DEPRECATED! Use -e or --exit-code instead.", - default=None, - required=False) -parser.add_argument('url', - help='the url to post to') -parser.add_argument('-k', '--insecure', - help="This will make insecure https request to cfn-api.", - action='store_true') -args = parser.parse_args() - -log_format = '%(levelname)s [%(asctime)s] %(message)s' -log_file_name = "/var/log/cfn-signal.log" -logging.basicConfig(filename=log_file_name, - format=log_format, - level=logging.DEBUG) - -LOG = logging.getLogger('cfntools') - -LOG.debug('cfn-signal called %s ' % (str(args))) -if args.exit: - LOG.warning('--exit DEPRECATED! Use -e or --exit-code instead.') -status = 'FAILURE' -exit_code = args.exit_code or args.exit -if exit_code: - # "exit_code" takes precedence over "success". - if exit_code == '0': - status = 'SUCCESS' -else: - if args.success == 'true': - status = 'SUCCESS' - -unique_id = args.unique_id -if unique_id is None: - LOG.debug('No id passed from the command line') - md = cfn_helper.Metadata('not-used', None) - unique_id = md.get_instance_id() - if unique_id is None: - LOG.error('Could not get the instance id from metadata!') - import socket - unique_id = socket.getfqdn() -LOG.debug('id: %s' % (unique_id)) - -body = { - "Status": status, - "Reason": args.reason, - "UniqueId": unique_id, - "Data": args.data -} -data = cfn_helper.json.dumps(body) - -cmd = ['curl'] -if args.insecure: - cmd.append('--insecure') -cmd.extend([ - '-X', 'PUT', - '-H', 'Content-Type:', - '--data-binary', data, - args.url -]) - -command = cfn_helper.CommandRunner(cmd).run() -if command.status != 0: - LOG.error(command.stderr) -sys.exit(command.status) diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index 6438f1c..0000000 --- a/doc/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -target/ -build/ diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 5e5f8ff..0000000 --- a/doc/Makefile +++ /dev/null @@ -1,153 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Heat.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Heat.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Heat" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Heat" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index a8c1e34..0000000 --- a/doc/README.rst +++ /dev/null @@ -1,23 +0,0 @@ -====================== -Building the man pages -====================== - -Dependencies -============ - -Sphinx_ - You'll need sphinx (the python one) and if you are - using the virtualenv you'll need to install it in the virtualenv - specifically so that it can load the cinder modules. - - :: - - sudo yum install python-sphinx - sudo pip-python install sphinxcontrib-httpdomain - -Use `make` -========== - -To build the man pages: - - make man diff --git a/doc/source/cfn-create-aws-symlinks.rst b/doc/source/cfn-create-aws-symlinks.rst deleted file mode 100644 index 308ef3b..0000000 --- a/doc/source/cfn-create-aws-symlinks.rst +++ /dev/null @@ -1,34 +0,0 @@ -======================= -cfn-create-aws-symlinks -======================= - -.. program:: cfn-create-aws-symlinks - -SYNOPSIS -======== - -``cfn-create-aws-symlinks`` - -DESCRIPTION -=========== -Creates symlinks for the cfn-* scripts in this directory to /opt/aws/bin - - -OPTIONS -======= -.. cmdoption:: -t, --target - - Target directory to create symlinks, defaults to /opt/aws/bin - -.. cmdoption:: -s, --source - - Source directory to create symlinks from. Defaults to the directory where this script is - -.. cmdoption:: -f, --force - - If specified, will create symlinks even if there is already a target file - - -BUGS -==== -Heat bugs are managed through Launchpad \ No newline at end of file diff --git a/doc/source/cfn-get-metadata.rst b/doc/source/cfn-get-metadata.rst deleted file mode 100644 index 9752d1f..0000000 --- a/doc/source/cfn-get-metadata.rst +++ /dev/null @@ -1,55 +0,0 @@ -================ -cfn-get-metadata -================ - -.. program:: cfn-get-metadata - -SYNOPSIS -======== - -``cfn-get-metadata`` - -DESCRIPTION -=========== -Implements cfn-get-metadata CloudFormation functionality - - -OPTIONS -======= -.. cmdoption:: -s --stack - - A Heat stack name - -.. cmdoption:: -r --resource - - A Heat logical resource ID - -.. cmdoption:: --access-key - - A Keystone access key - -.. cmdoption:: --secret-key - - A Keystone secret key - -.. cmdoption:: --region - - Openstack region - -.. cmdoption:: --credential-file - - credential-file - -.. cmdoption:: -u --url - - service url - -.. cmdoption:: -k --key - - key - - - -BUGS -==== -Heat bugs are managed through Launchpad \ No newline at end of file diff --git a/doc/source/cfn-hup.rst b/doc/source/cfn-hup.rst deleted file mode 100644 index 5f338d2..0000000 --- a/doc/source/cfn-hup.rst +++ /dev/null @@ -1,34 +0,0 @@ -======= -cfn-hup -======= - -.. program:: cfn-hup - -SYNOPSIS -======== - -``cfn-hup`` - -DESCRIPTION -=========== -Implements cfn-hup CloudFormation functionality - - -OPTIONS -======= -.. cmdoption:: -c, --config - - Hook Config Directory, defaults to /etc/cfn/hooks.d - -.. cmdoption:: -f, --no-daemon - - Do not run as a daemon - -.. cmdoption:: -v, --verbose - - Verbose logging - - -BUGS -==== -Heat bugs are managed through Launchpad diff --git a/doc/source/cfn-init.rst b/doc/source/cfn-init.rst deleted file mode 100644 index a336559..0000000 --- a/doc/source/cfn-init.rst +++ /dev/null @@ -1,46 +0,0 @@ -======== -cfn-init -======== - -.. program:: cfn-init - -SYNOPSIS -======== - -``cfn-init`` - -DESCRIPTION -=========== -Implements cfn-init CloudFormation functionality - - -OPTIONS -======= -.. cmdoption:: -s, --stack - - A Heat stack name - -.. cmdoption:: -r, --resource - - A Heat logical resource ID - -.. cmdoption:: --access-key - - A Keystone access key - -.. cmdoption:: --secret-key - - A Keystone secret key - -.. cmdoption:: --region - - Openstack region - -.. cmdoption:: -c, --configsets - - An optional list of configSets (default: default) - - -BUGS -==== -Heat bugs are managed through Launchpad \ No newline at end of file diff --git a/doc/source/cfn-push-stats.rst b/doc/source/cfn-push-stats.rst deleted file mode 100644 index 5b872c5..0000000 --- a/doc/source/cfn-push-stats.rst +++ /dev/null @@ -1,98 +0,0 @@ -============== -cfn-push-stats -============== - -.. program:: cfn-push-stats - -SYNOPSIS -======== - -``cfn-push-stats`` - -DESCRIPTION -=========== -Implements cfn-push-stats CloudFormation functionality - - -OPTIONS -======= -.. cmdoption:: -v, --verbose - - Verbose logging - -.. cmdoption:: --credential-file - - credential-file - -.. cmdoption:: --service-failure - - Reports a service falure. - -.. cmdoption:: --mem-util - - Reports memory utilization in percentages. - -.. cmdoption:: --mem-used - - Reports memory used (excluding cache and buffers) in megabytes. - -.. cmdoption:: --mem-avail - - Reports available memory (including cache and buffers) in megabytes. - -.. cmdoption:: --swap-util - - Reports swap utilization in percentages. - -.. cmdoption:: --swap-used - - Reports allocated swap space in megabytes. - -.. cmdoption:: --disk-space-util - - Reports disk space utilization in percentages. - -.. cmdoption:: --disk-space-used - - Reports allocated disk space in gigabytes. - -.. cmdoption:: --disk-space-avail - - Reports available disk space in gigabytes. - -.. cmdoption:: --memory-units - - Specifies units for memory metrics. - -.. cmdoption:: --disk-units - - Specifies units for disk metrics. - -.. cmdoption:: --disk-path - - Selects the disk by the path on which to report. - -.. cmdoption:: --cpu-util - - Reports cpu utilization in percentages. - -.. cmdoption:: --haproxy - - Reports HAProxy loadbalancer usage. - -.. cmdoption:: --haproxy-latency - - Reports HAProxy latency - -.. cmdoption:: --heartbeat - - Sends a Heartbeat. - -.. cmdoption:: --watch - - the name of the watch to post to. - - -BUGS -==== -Heat bugs are managed through Launchpad \ No newline at end of file diff --git a/doc/source/cfn-signal.rst b/doc/source/cfn-signal.rst deleted file mode 100644 index 327ac35..0000000 --- a/doc/source/cfn-signal.rst +++ /dev/null @@ -1,42 +0,0 @@ -========== -cfn-signal -========== - -.. program:: cfn-signal - -SYNOPSIS -======== - -``cfn-signal`` - -DESCRIPTION -=========== -Implements cfn-signal CloudFormation functionality - - -OPTIONS -======= -.. cmdoption:: -s, --success - - signal status to report - -.. cmdoption:: -r, --reason - - The reason for the failure - -.. cmdoption:: --data - - The data to send - -.. cmdoption:: -i, --id - - the unique id to send back to the WaitCondition - -.. cmdoption:: -e, --exit - - The exit code from a procecc to interpret - - -BUGS -==== -Heat bugs are managed through Launchpad \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 567b444..0000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# heat-cfntools documentation build configuration file, created by -# sphinx-quickstart on Thu Jul 20 09:19:39 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinx.ext.autodoc', - 'openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'heat-cfntools' -copyright = 'OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '' -# The full version, including alpha/beta/rc tags. -release = '' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -# todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -# html_sidebars = {} - -# -- Options for openstackdocstheme -------------------------------------- -repository_name = 'openstack/heat-cfntools' -bug_project = 'heat-cfntools' -bug_tag = '' - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'heat-cfntoolsdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'heat-cfntools.tex', 'heat-cfntools Documentation', - 'OpenStack Foundation', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'heat-cfntools', 'heat-cfntools Documentation', - ['Heat Developers'], 1), - ('cfn-create-aws-symlinks', 'cfn-create-aws-symlinks', - u'Creates symlinks for the cfn-* scripts in this directory to /opt/aws/bin', - [u'Heat Developers'], 1), - ('cfn-get-metadata', 'cfn-get-metadata', - u'Implements cfn-get-metadata CloudFormation functionality', - [u'Heat Developers'], 1), - ('cfn-hup', 'cfn-hup', - u'Implements cfn-hup CloudFormation functionality', - [u'Heat Developers'], 1), - ('cfn-init', 'cfn-init', - u'Implements cfn-init CloudFormation functionality', - [u'Heat Developers'], 1), - ('cfn-push-stats', 'cfn-push-stats', - u'Implements cfn-push-stats CloudFormation functionality', - [u'Heat Developers'], 1), - ('cfn-signal', 'cfn-signal', - u'Implements cfn-signal CloudFormation functionality', - [u'Heat Developers'], 1), -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'heat-cfntools', 'heat-cfntools Documentation', - 'Heat Developers', 'heat-cfntools', 'One line description of project.', - 'Miscellaneous'), -] diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 54479b1..0000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -=================================== -Man pages for Heat cfntools utilities -=================================== - -------------- -Heat cfntools -------------- - -.. toctree:: - :maxdepth: 1 - - cfn-create-aws-symlinks - cfn-get-metadata - cfn-hup - cfn-init - cfn-push-stats - cfn-signal diff --git a/heat_cfntools/__init__.py b/heat_cfntools/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/heat_cfntools/cfntools/__init__.py b/heat_cfntools/cfntools/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/heat_cfntools/cfntools/cfn_helper.py b/heat_cfntools/cfntools/cfn_helper.py deleted file mode 100644 index f2b9f90..0000000 --- a/heat_cfntools/cfntools/cfn_helper.py +++ /dev/null @@ -1,1533 +0,0 @@ - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements cfn metadata handling - -Not implemented yet: - * command line args - - placeholders are ignored -""" -import atexit -import contextlib -import errno -import functools -import grp -import json -import logging -import os -import os.path -import pwd -try: - import rpmUtils.miscutils as rpmutils - import rpmUtils.updates as rpmupdates - rpmutils_present = True -except ImportError: - rpmutils_present = False -import re -import shutil -import six -import six.moves.configparser as ConfigParser -import subprocess -import tempfile - - -# Override BOTO_CONFIG, which makes boto look only at the specified -# config file, instead of the default locations -os.environ['BOTO_CONFIG'] = '/var/lib/heat-cfntools/cfn-boto-cfg' -from boto import cloudformation - - -LOG = logging.getLogger(__name__) - - -def to_boolean(b): - val = b.lower().strip() if isinstance(b, six.string_types) else b - return val in [True, 'true', 'yes', '1', 1] - - -def parse_creds_file(path='/etc/cfn/cfn-credentials'): - '''Parse the cfn credentials file. - - Default location is as specified, and it is expected to contain - exactly two keys "AWSAccessKeyId" and "AWSSecretKey) - The two keys are returned a dict (if found) - ''' - creds = {'AWSAccessKeyId': None, 'AWSSecretKey': None} - for line in open(path): - for key in creds: - match = re.match("^%s *= *(.*)$" % key, line) - if match: - creds[key] = match.group(1) - return creds - - -class HupConfig(object): - def __init__(self, fp_list): - self.config = ConfigParser.SafeConfigParser() - for fp in fp_list: - self.config.readfp(fp) - - self.load_main_section() - - self.hooks = [] - for s in self.config.sections(): - if s != 'main': - self.hooks.append(Hook( - s, - self.config.get(s, 'triggers'), - self.config.get(s, 'path'), - self.config.get(s, 'runas'), - self.config.get(s, 'action'))) - - def load_main_section(self): - # required values - self.stack = self.config.get('main', 'stack') - self.credential_file = self.config.get('main', 'credential-file') - try: - with open(self.credential_file) as f: - self.credentials = f.read() - except Exception: - raise Exception("invalid credentials file %s" % - self.credential_file) - - # optional values - try: - self.region = self.config.get('main', 'region') - except ConfigParser.NoOptionError: - self.region = 'nova' - - try: - self.interval = self.config.getint('main', 'interval') - except ConfigParser.NoOptionError: - self.interval = 10 - - def __str__(self): - return '{stack: %s, credential_file: %s, region: %s, interval:%d}' % \ - (self.stack, self.credential_file, self.region, self.interval) - - def unique_resources_get(self): - resources = [] - for h in self.hooks: - r = h.resource_name_get() - if r not in resources: - resources.append(h.resource_name_get()) - return resources - - -class Hook(object): - def __init__(self, name, triggers, path, runas, action): - self.name = name - self.triggers = triggers - self.path = path - self.runas = runas - self.action = action - - def resource_name_get(self): - sp = self.path.split('.') - return sp[1] - - def event(self, ev_name, ev_object, ev_resource): - if self.resource_name_get() == ev_resource and \ - ev_name in self.triggers: - CommandRunner(self.action, shell=True).run(user=self.runas) - else: - LOG.debug('event: {%s, %s, %s} did not match %s' % - (ev_name, ev_object, ev_resource, self.__str__())) - - def __str__(self): - return '{%s, %s, %s, %s, %s}' % \ - (self.name, - self.triggers, - self.path, - self.runas, - self.action) - - -class ControlledPrivilegesFailureException(Exception): - pass - - -@contextlib.contextmanager -def controlled_privileges(user): - orig_euid = None - try: - real = pwd.getpwnam(user) - if os.geteuid() != real.pw_uid: - orig_euid = os.geteuid() - os.seteuid(real.pw_uid) - LOG.debug("Privileges set for user %s" % user) - except Exception as e: - raise ControlledPrivilegesFailureException(e) - - try: - yield - finally: - if orig_euid is not None: - try: - os.seteuid(orig_euid) - LOG.debug("Original privileges restored.") - except Exception as e: - LOG.error("Error restoring privileges %s" % e) - - -class CommandRunner(object): - """Helper class to run a command and store the output.""" - - def __init__(self, command, shell=False, nextcommand=None): - self._command = command - self._shell = shell - self._next = nextcommand - self._stdout = None - self._stderr = None - self._status = None - - def __str__(self): - s = "CommandRunner:" - s += "\n\tcommand: %s" % self._command - if self._status: - s += "\n\tstatus: %s" % self.status - if self._stdout: - s += "\n\tstdout: %s" % self.stdout - if self._stderr: - s += "\n\tstderr: %s" % self.stderr - return s - - def run(self, user='root', cwd=None, env=None): - """Run the Command and return the output. - - Returns: - self - """ - LOG.debug("Running command: %s" % self._command) - - cmd = self._command - shell = self._shell - - # Ensure commands that are given as string are run on shell - assert isinstance(cmd, six.string_types) is bool(shell) - - try: - with controlled_privileges(user): - subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, cwd=cwd, - env=env, shell=shell) - output = subproc.communicate() - self._status = subproc.returncode - self._stdout = output[0] - self._stderr = output[1] - except ControlledPrivilegesFailureException as e: - LOG.error("Error setting privileges for user '%s': %s" - % (user, e)) - self._status = 126 - self._stderr = six.text_type(e) - - if self._status: - LOG.debug("Return code of %d after executing: '%s'\n" - "stdout: '%s'\n" - "stderr: '%s'" % (self._status, cmd, self._stdout, - self._stderr)) - - if self._next: - self._next.run() - return self - - @property - def stdout(self): - return self._stdout - - @property - def stderr(self): - return self._stderr - - @property - def status(self): - return self._status - - -class RpmHelper(object): - - if rpmutils_present: - _rpm_util = rpmupdates.Updates([], []) - - @classmethod - def compare_rpm_versions(cls, v1, v2): - """Compare two RPM version strings. - - Arguments: - v1 -- a version string - v2 -- a version string - - Returns: - 0 -- the versions are equal - 1 -- v1 is greater - -1 -- v2 is greater - """ - if v1 and v2: - return rpmutils.compareVerOnly(v1, v2) - elif v1: - return 1 - elif v2: - return -1 - else: - return 0 - - @classmethod - def newest_rpm_version(cls, versions): - """Returns the highest (newest) version from a list of versions. - - Arguments: - versions -- A list of version strings - e.g., ['2.0', '2.2', '2.2-1.fc16', '2.2.22-1.fc16'] - """ - if versions: - if isinstance(versions, six.string_types): - return versions - versions = sorted(versions, rpmutils.compareVerOnly, - reverse=True) - return versions[0] - else: - return None - - @classmethod - def rpm_package_version(cls, pkg): - """Returns the version of an installed RPM. - - Arguments: - pkg -- A package name - """ - cmd = "rpm -q --queryformat '%{VERSION}-%{RELEASE}' %s" % pkg - command = CommandRunner(cmd).run() - return command.stdout - - @classmethod - def rpm_package_installed(cls, pkg): - """Indicates whether pkg is in rpm database. - - Arguments: - pkg -- A package name (with optional version and release spec). - e.g., httpd - e.g., httpd-2.2.22 - e.g., httpd-2.2.22-1.fc16 - """ - cmd = ['rpm', '-q', pkg] - command = CommandRunner(cmd).run() - return command.status == 0 - - @classmethod - def yum_package_available(cls, pkg): - """Indicates whether pkg is available via yum. - - Arguments: - pkg -- A package name (with optional version and release spec). - e.g., httpd - e.g., httpd-2.2.22 - e.g., httpd-2.2.22-1.fc16 - """ - cmd = ['yum', '-y', '--showduplicates', 'list', 'available', pkg] - command = CommandRunner(cmd).run() - return command.status == 0 - - @classmethod - def dnf_package_available(cls, pkg): - """Indicates whether pkg is available via dnf. - - Arguments: - pkg -- A package name (with optional version and release spec). - e.g., httpd - e.g., httpd-2.2.22 - e.g., httpd-2.2.22-1.fc21 - """ - cmd = ['dnf', '-y', '--showduplicates', 'list', 'available', pkg] - command = CommandRunner(cmd).run() - return command.status == 0 - - @classmethod - def zypper_package_available(cls, pkg): - """Indicates whether pkg is available via zypper. - - Arguments: - pkg -- A package name (with optional version and release spec). - e.g., httpd - e.g., httpd-2.2.22 - e.g., httpd-2.2.22-1.fc16 - """ - cmd = ['zypper', '-n', '--no-refresh', 'search', pkg] - command = CommandRunner(cmd).run() - return command.status == 0 - - @classmethod - def install(cls, packages, rpms=True, zypper=False, dnf=False): - """Installs (or upgrades) packages via RPM, yum, dnf, or zypper. - - Arguments: - packages -- a list of packages to install - rpms -- if True: - * use RPM to install the packages - * packages must be a list of URLs to retrieve RPMs - if False: - * use Yum to install packages - * packages is a list of: - - pkg name (httpd), or - - pkg name with version spec (httpd-2.2.22), or - - pkg name with version-release spec - (httpd-2.2.22-1.fc16) - zypper -- if True: - * overrides use of yum, use zypper instead - dnf -- if True: - * overrides use of yum, use dnf instead - * packages must be in same format as yum pkg list - """ - if rpms: - cmd = ['rpm', '-U', '--force', '--nosignature'] - elif zypper: - cmd = ['zypper', '-n', 'install'] - elif dnf: - # use dnf --best to upgrade outdated-but-installed packages - cmd = ['dnf', '-y', '--best', 'install'] - else: - cmd = ['yum', '-y', 'install'] - cmd.extend(packages) - LOG.info("Installing packages: %s" % cmd) - command = CommandRunner(cmd).run() - if command.status: - LOG.warning("Failed to install packages: %s" % cmd) - - @classmethod - def downgrade(cls, packages, rpms=True, zypper=False, dnf=False): - """Downgrades a set of packages via RPM, yum, dnf, or zypper. - - Arguments: - packages -- a list of packages to downgrade - rpms -- if True: - * use RPM to downgrade (replace) the packages - * packages must be a list of URLs to retrieve the RPMs - if False: - * use Yum to downgrade packages - * packages is a list of: - - pkg name with version spec (httpd-2.2.22), or - - pkg name with version-release spec - (httpd-2.2.22-1.fc16) - dnf -- if True: - * Use dnf instead of RPM/yum - """ - if rpms: - cls.install(packages) - elif zypper: - cmd = ['zypper', '-n', 'install', '--oldpackage'] - cmd.extend(packages) - LOG.info("Downgrading packages: %s", cmd) - command = CommandRunner(cmd).run() - if command.status: - LOG.warning("Failed to downgrade packages: %s" % cmd) - elif dnf: - cmd = ['dnf', '-y', 'downgrade'] - cmd.extend(packages) - LOG.info("Downgrading packages: %s", cmd) - command = CommandRunner(cmd).run() - if command.status: - LOG.warning("Failed to downgrade packages: %s" % cmd) - else: - cmd = ['yum', '-y', 'downgrade'] - cmd.extend(packages) - LOG.info("Downgrading packages: %s" % cmd) - command = CommandRunner(cmd).run() - if command.status: - LOG.warning("Failed to downgrade packages: %s" % cmd) - - -class PackagesHandler(object): - _packages = {} - - _package_order = ["dpkg", "rpm", "apt", "yum", "dnf"] - - @staticmethod - def _pkgsort(pkg1, pkg2): - order = PackagesHandler._package_order - p1_name = pkg1[0] - p2_name = pkg2[0] - if p1_name in order and p2_name in order: - return cmp(order.index(p1_name), order.index(p2_name)) - elif p1_name in order: - return -1 - elif p2_name in order: - return 1 - else: - return cmp(p1_name.lower(), p2_name.lower()) - - def __init__(self, packages): - self._packages = packages - - def _handle_gem_packages(self, packages): - """very basic support for gems.""" - # TODO(asalkeld) support versions - # -b == local & remote install - # -y == install deps - opts = ['-b', '-y'] - for pkg_name, versions in packages.items(): - if len(versions) > 0: - cmd = ['gem', 'install'] + opts - cmd.extend(['--version', versions[0], pkg_name]) - CommandRunner(cmd).run() - else: - cmd = ['gem', 'install'] + opts - cmd.append(pkg_name) - CommandRunner(cmd).run() - - def _handle_python_packages(self, packages): - """very basic support for easy_install.""" - # TODO(asalkeld) support versions - for pkg_name, versions in packages.items(): - cmd = ['easy_install', pkg_name] - CommandRunner(cmd).run() - - def _handle_zypper_packages(self, packages): - """Handle installation, upgrade, or downgrade of packages via yum. - - Arguments: - packages -- a package entries map of the form: - "pkg_name" : "version", - "pkg_name" : ["v1", "v2"], - "pkg_name" : [] - - For each package entry: - * if no version is supplied and the package is already installed, do - nothing - * if no version is supplied and the package is _not_ already - installed, install it - * if a version string is supplied, and the package is already - installed, determine whether to downgrade or upgrade (or do nothing - if version matches installed package) - * if a version array is supplied, choose the highest version from the - array and follow same logic for version string above - """ - # collect pkgs for batch processing at end - installs = [] - downgrades = [] - for pkg_name, versions in packages.items(): - ver = RpmHelper.newest_rpm_version(versions) - pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name - if RpmHelper.rpm_package_installed(pkg): - # FIXME:print non-error, but skipping pkg - pass - elif not RpmHelper.zypper_package_available(pkg): - LOG.warning( - "Skipping package '%s' - unavailable via zypper", pkg) - elif not ver: - installs.append(pkg) - else: - current_ver = RpmHelper.rpm_package_version(pkg) - rc = RpmHelper.compare_rpm_versions(current_ver, ver) - if rc < 0: - installs.append(pkg) - elif rc > 0: - downgrades.append(pkg) - if installs: - RpmHelper.install(installs, rpms=False, zypper=True) - if downgrades: - RpmHelper.downgrade(downgrades, zypper=True) - - def _handle_dnf_packages(self, packages): - """Handle installation, upgrade, or downgrade of packages via dnf. - - Arguments: - packages -- a package entries map of the form: - "pkg_name" : "version", - "pkg_name" : ["v1", "v2"], - "pkg_name" : [] - - For each package entry: - * if no version is supplied and the package is already installed, do - nothing - * if no version is supplied and the package is _not_ already - installed, install it - * if a version string is supplied, and the package is already - installed, determine whether to downgrade or upgrade (or do nothing - if version matches installed package) - * if a version array is supplied, choose the highest version from the - array and follow same logic for version string above - """ - # collect pkgs for batch processing at end - installs = [] - downgrades = [] - for pkg_name, versions in packages.items(): - ver = RpmHelper.newest_rpm_version(versions) - pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name - if RpmHelper.rpm_package_installed(pkg): - # FIXME:print non-error, but skipping pkg - pass - elif not RpmHelper.dnf_package_available(pkg): - LOG.warning( - "Skipping package '%s'. Not available via yum" % pkg) - elif not ver: - installs.append(pkg) - else: - current_ver = RpmHelper.rpm_package_version(pkg) - rc = RpmHelper.compare_rpm_versions(current_ver, ver) - if rc < 0: - installs.append(pkg) - elif rc > 0: - downgrades.append(pkg) - if installs: - RpmHelper.install(installs, rpms=False, dnf=True) - if downgrades: - RpmHelper.downgrade(downgrades, rpms=False, dnf=True) - - def _handle_yum_packages(self, packages): - """Handle installation, upgrade, or downgrade of packages via yum. - - Arguments: - packages -- a package entries map of the form: - "pkg_name" : "version", - "pkg_name" : ["v1", "v2"], - "pkg_name" : [] - - For each package entry: - * if no version is supplied and the package is already installed, do - nothing - * if no version is supplied and the package is _not_ already - installed, install it - * if a version string is supplied, and the package is already - installed, determine whether to downgrade or upgrade (or do nothing - if version matches installed package) - * if a version array is supplied, choose the highest version from the - array and follow same logic for version string above - """ - - cmd = CommandRunner(['which', 'yum']).run() - if cmd.status == 1: - # yum not available, use DNF if available - self._handle_dnf_packages(packages) - return - elif cmd.status == 127: - # `which` command not found - LOG.info("`which` not found. Using yum without checking if dnf " - "is available") - - # collect pkgs for batch processing at end - installs = [] - downgrades = [] - for pkg_name, versions in packages.items(): - ver = RpmHelper.newest_rpm_version(versions) - pkg = "%s-%s" % (pkg_name, ver) if ver else pkg_name - if RpmHelper.rpm_package_installed(pkg): - # FIXME:print non-error, but skipping pkg - pass - elif not RpmHelper.yum_package_available(pkg): - LOG.warning( - "Skipping package '%s'. Not available via yum" % pkg) - elif not ver: - installs.append(pkg) - else: - current_ver = RpmHelper.rpm_package_version(pkg) - rc = RpmHelper.compare_rpm_versions(current_ver, ver) - if rc < 0: - installs.append(pkg) - elif rc > 0: - downgrades.append(pkg) - if installs: - RpmHelper.install(installs, rpms=False) - if downgrades: - RpmHelper.downgrade(downgrades) - - def _handle_rpm_packages(self, packages): - """Handle installation, upgrade, or downgrade of packages via rpm. - - Arguments: - packages -- a package entries map of the form: - "pkg_name" : "url" - - For each package entry: - * if the EXACT package is already installed, skip it - * if a different version of the package is installed, overwrite it - * if the package isn't installed, install it - """ - #FIXME: handle rpm installs - pass - - def _handle_apt_packages(self, packages): - """very basic support for apt.""" - # TODO(asalkeld) support versions - pkg_list = list(packages) - - env = {'DEBIAN_FRONTEND': 'noninteractive'} - cmd = ['apt-get', '-y', 'install'] + pkg_list - CommandRunner(cmd).run(env=env) - - # map of function pointers to handle different package managers - _package_handlers = {"yum": _handle_yum_packages, - "dnf": _handle_dnf_packages, - "zypper": _handle_zypper_packages, - "rpm": _handle_rpm_packages, - "apt": _handle_apt_packages, - "rubygems": _handle_gem_packages, - "python": _handle_python_packages} - - def _package_handler(self, manager_name): - handler = None - if manager_name in self._package_handlers: - handler = self._package_handlers[manager_name] - return handler - - def apply_packages(self): - """Install, upgrade, or downgrade packages listed. - - Each package is a dict containing package name and a list of versions - Install order: - * dpkg - * rpm - * apt - * yum - * dnf - """ - if not self._packages: - return - try: - packages = sorted( - self._packages.items(), cmp=PackagesHandler._pkgsort) - except TypeError: - # On Python 3, we have to use key instead of cmp - # This could also work on Python 2.7, but not on 2.6 - packages = sorted( - self._packages.items(), - key=functools.cmp_to_key(PackagesHandler._pkgsort)) - - for manager, package_entries in packages: - handler = self._package_handler(manager) - if not handler: - LOG.warning("Skipping invalid package type: %s" % manager) - else: - handler(self, package_entries) - - -class FilesHandler(object): - def __init__(self, files): - self._files = files - - def apply_files(self): - if not self._files: - return - for fdest, meta in self._files.items(): - dest = fdest.encode() - try: - os.makedirs(os.path.dirname(dest)) - except OSError as e: - if e.errno == errno.EEXIST: - LOG.debug(str(e)) - else: - LOG.exception(e) - - if 'content' in meta: - if isinstance(meta['content'], six.string_types): - f = open(dest, 'w+') - f.write(meta['content']) - f.close() - else: - f = open(dest, 'w+') - f.write(json.dumps(meta['content'], indent=4) - .encode('UTF-8')) - f.close() - elif 'source' in meta: - CommandRunner(['curl', '-o', dest, meta['source']]).run() - else: - LOG.error('%s %s' % (dest, str(meta))) - continue - - uid = -1 - gid = -1 - if 'owner' in meta: - try: - user_info = pwd.getpwnam(meta['owner']) - uid = user_info[2] - except KeyError: - pass - - if 'group' in meta: - try: - group_info = grp.getgrnam(meta['group']) - gid = group_info[2] - except KeyError: - pass - - os.chown(dest, uid, gid) - if 'mode' in meta: - os.chmod(dest, int(meta['mode'], 8)) - - -class SourcesHandler(object): - '''tar, tar+gzip,tar+bz2 and zip.''' - _sources = {} - - def __init__(self, sources): - self._sources = sources - - def _url_to_tmp_filename(self, url): - tempdir = tempfile.mkdtemp() - atexit.register(lambda: shutil.rmtree(tempdir, True)) - name = os.path.basename(url) - return os.path.join(tempdir, name) - - def _splitext(self, path): - (r, ext) = os.path.splitext(path) - return (r, ext.lower()) - - def _github_ball_type(self, url): - ext = "" - if url.endswith('/'): - url = url[0:-1] - sp = url.split('/') - if len(sp) > 2: - http = sp[0].startswith('http') - github = sp[2].endswith('github.com') - btype = sp[-2] - if http and github: - if 'zipball' == btype: - ext = '.zip' - elif 'tarball' == btype: - ext = '.tgz' - return ext - - def _source_type(self, url): - (r, ext) = self._splitext(url) - if ext == '.gz': - (r, ext2) = self._splitext(r) - if ext2 == '.tar': - ext = '.tgz' - elif ext == '.bz2': - (r, ext2) = self._splitext(r) - if ext2 == '.tar': - ext = '.tbz2' - elif ext == "": - ext = self._github_ball_type(url) - - return ext - - def _apply_source_cmd(self, dest, url): - cmd = "" - basename = os.path.basename(url) - stype = self._source_type(url) - if stype == '.tgz': - cmd = "curl -s '%s' | gunzip | tar -xvf -" % url - elif stype == '.tbz2': - cmd = "curl -s '%s' | bunzip2 | tar -xvf -" % url - elif stype == '.zip': - tmp = self._url_to_tmp_filename(url) - cmd = "curl -s -o '%s' '%s' && unzip -o '%s'" % (tmp, url, tmp) - elif stype == '.tar': - cmd = "curl -s '%s' | tar -xvf -" % url - elif stype == '.gz': - (r, ext) = self._splitext(basename) - cmd = "curl -s '%s' | gunzip > '%s'" % (url, r) - elif stype == '.bz2': - (r, ext) = self._splitext(basename) - cmd = "curl -s '%s' | bunzip2 > '%s'" % (url, r) - - if cmd != '': - cmd = "mkdir -p '%s'; cd '%s'; %s" % (dest, dest, cmd) - - return cmd - - def _apply_source(self, dest, url): - cmd = self._apply_source_cmd(dest, url) - #FIXME bug 1498298 - if cmd != '': - runner = CommandRunner(cmd, shell=True) - runner.run() - - def apply_sources(self): - if not self._sources: - return - for dest, url in self._sources.items(): - self._apply_source(dest, url) - - -class ServicesHandler(object): - _services = {} - - def __init__(self, services, resource=None, hooks=None): - self._services = services - self.resource = resource - self.hooks = hooks - - def _handle_sysv_command(self, service, command): - if os.path.exists("/bin/systemctl"): - service_exe = "/bin/systemctl" - service = '%s.service' % service - service_start = [service_exe, 'start', service] - service_status = [service_exe, 'status', service] - service_stop = [service_exe, 'stop', service] - elif os.path.exists("/sbin/service"): - service_exe = "/sbin/service" - service_start = [service_exe, service, 'start'] - service_status = [service_exe, service, 'status'] - service_stop = [service_exe, service, 'stop'] - else: - service_exe = "/usr/sbin/service" - service_start = [service_exe, service, 'start'] - service_status = [service_exe, service, 'status'] - service_stop = [service_exe, service, 'stop'] - - if os.path.exists("/bin/systemctl"): - enable_exe = "/bin/systemctl" - enable_on = [enable_exe, 'enable', service] - enable_off = [enable_exe, 'disable', service] - elif os.path.exists("/sbin/chkconfig"): - enable_exe = "/sbin/chkconfig" - enable_on = [enable_exe, service, 'on'] - enable_off = [enable_exe, service, 'off'] - - else: - enable_exe = "/usr/sbin/update-rc.d" - enable_on = [enable_exe, service, 'enable'] - enable_off = [enable_exe, service, 'disable'] - - cmd = None - if "enable" == command: - cmd = enable_on - elif "disable" == command: - cmd = enable_off - elif "start" == command: - cmd = service_start - elif "stop" == command: - cmd = service_stop - elif "status" == command: - cmd = service_status - - if cmd is not None: - command = CommandRunner(cmd) - command.run() - return command - else: - LOG.error("Unknown sysv command %s" % command) - - def _initialize_service(self, handler, service, properties): - if "enabled" in properties: - enable = to_boolean(properties["enabled"]) - if enable: - LOG.info("Enabling service %s" % service) - handler(self, service, "enable") - else: - LOG.info("Disabling service %s" % service) - handler(self, service, "disable") - - if "ensureRunning" in properties: - ensure_running = to_boolean(properties["ensureRunning"]) - command = handler(self, service, "status") - running = command.status == 0 - if ensure_running and not running: - LOG.info("Starting service %s" % service) - handler(self, service, "start") - elif not ensure_running and running: - LOG.info("Stopping service %s" % service) - handler(self, service, "stop") - - def _monitor_service(self, handler, service, properties): - if "ensureRunning" in properties: - ensure_running = to_boolean(properties["ensureRunning"]) - command = handler(self, service, "status") - running = command.status == 0 - if ensure_running and not running: - LOG.warning("Restarting service %s" % service) - start_cmd = handler(self, service, "start") - if start_cmd.status != 0: - LOG.warning('Service %s did not start. STDERR: %s' % - (service, start_cmd.stderr)) - for h in self.hooks: - h.event('service.restarted', service, self.resource) - - def _monitor_services(self, handler, services): - for service, properties in services.items(): - self._monitor_service(handler, service, properties) - - def _initialize_services(self, handler, services): - for service, properties in services.items(): - self._initialize_service(handler, service, properties) - - # map of function pointers to various service handlers - _service_handlers = { - "sysvinit": _handle_sysv_command, - "systemd": _handle_sysv_command - } - - def _service_handler(self, manager_name): - handler = None - if manager_name in self._service_handlers: - handler = self._service_handlers[manager_name] - return handler - - def apply_services(self): - """Starts, stops, enables, disables services.""" - if not self._services: - return - for manager, service_entries in self._services.items(): - handler = self._service_handler(manager) - if not handler: - LOG.warning("Skipping invalid service type: %s" % manager) - else: - self._initialize_services(handler, service_entries) - - def monitor_services(self): - """Restarts failed services, and runs hooks.""" - if not self._services: - return - for manager, service_entries in self._services.items(): - handler = self._service_handler(manager) - if not handler: - LOG.warning("Skipping invalid service type: %s" % manager) - else: - self._monitor_services(handler, service_entries) - - -class ConfigsetsHandler(object): - - def __init__(self, configsets, selectedsets): - self.configsets = configsets - self.selectedsets = selectedsets - - def expand_sets(self, list, executionlist): - for elem in list: - if isinstance(elem, dict): - dictkeys = elem.keys() - if len(dictkeys) != 1 or dictkeys.pop() != 'ConfigSet': - raise Exception('invalid ConfigSets metadata') - dictkey = elem.values().pop() - try: - self.expand_sets(self.configsets[dictkey], executionlist) - except KeyError: - raise Exception("Undefined ConfigSet '%s' referenced" - % dictkey) - else: - executionlist.append(elem) - - def get_configsets(self): - """Returns a list of Configsets to execute in template.""" - if not self.configsets: - if self.selectedsets: - raise Exception('Template has no configSets') - return - if not self.selectedsets: - if 'default' not in self.configsets: - raise Exception('Template has no default configSet, must' - ' specify') - self.selectedsets = 'default' - - selectedlist = [x.strip() for x in self.selectedsets.split(',')] - executionlist = [] - for item in selectedlist: - if item not in self.configsets: - raise Exception("Requested configSet '%s' not in configSets" - " section" % item) - self.expand_sets(self.configsets[item], executionlist) - if not executionlist: - raise Exception( - "Requested configSet %s empty?" % self.selectedsets) - - return executionlist - - -def metadata_server_port( - datafile='/var/lib/heat-cfntools/cfn-metadata-server'): - """Return the the metadata server port. - - Reads the :NNNN from the end of the URL in cfn-metadata-server - """ - try: - f = open(datafile) - server_url = f.read().strip() - f.close() - except IOError: - return None - - if len(server_url) < 1: - return None - - if server_url[-1] == '/': - server_url = server_url[:-1] - - try: - return int(server_url.split(':')[-1]) - except ValueError: - return None - - -class CommandsHandlerRunError(Exception): - pass - - -class CommandsHandler(object): - - def __init__(self, commands): - self.commands = commands - - def apply_commands(self): - """Execute commands on the instance in alphabetical order by name.""" - if not self.commands: - return - for command_label in sorted(self.commands): - LOG.debug("%s is being processed" % command_label) - self._initialize_command(command_label, - self.commands[command_label]) - - def _initialize_command(self, command_label, properties): - command_status = None - cwd = None - env = properties.get("env", None) - - if "cwd" in properties: - cwd = os.path.expanduser(properties["cwd"]) - if not os.path.exists(cwd): - LOG.error("%s has failed. " % command_label + - "%s path does not exist" % cwd) - return - - if "test" in properties: - test = CommandRunner(properties["test"], shell=True) - test_status = test.run('root', cwd, env).status - if test_status != 0: - LOG.info("%s test returns false, skipping command" - % command_label) - return - else: - LOG.debug("%s test returns true, proceeding" % command_label) - - if "command" in properties: - try: - command = properties["command"] - shell = isinstance(command, six.string_types) - command = CommandRunner(command, shell=shell) - command.run('root', cwd, env) - command_status = command.status - except OSError as e: - if e.errno == errno.EEXIST: - LOG.debug(str(e)) - else: - LOG.exception(e) - else: - LOG.error("%s has failed. " % command_label - + "'command' property missing") - return - - if command_status == 0: - LOG.info("%s has been successfully executed" % command_label) - else: - if "ignoreErrors" in properties and \ - to_boolean(properties["ignoreErrors"]): - LOG.info("%s has failed (status=%d). Explicit ignoring" - % (command_label, command_status)) - else: - raise CommandsHandlerRunError("%s has failed." % command_label) - - -class GroupsHandler(object): - - def __init__(self, groups): - self.groups = groups - - def apply_groups(self): - """Create Linux/UNIX groups and assign group IDs.""" - if not self.groups: - return - for group, properties in self.groups.items(): - LOG.debug("%s group is being created" % group) - self._initialize_group(group, properties) - - def _initialize_group(self, group, properties): - gid = properties.get("gid", None) - cmd = ['groupadd', group] - if gid is not None: - cmd.extend(['--gid', str(gid)]) - - command = CommandRunner(cmd) - command.run() - command_status = command.status - - if command_status == 0: - LOG.info("%s has been successfully created" % group) - elif command_status == 9: - LOG.error("An error occurred creating %s group : " % - group + "group name not unique") - elif command_status == 4: - LOG.error("An error occurred creating %s group : " % - group + "GID not unique") - elif command_status == 3: - LOG.error("An error occurred creating %s group : " % - group + "GID not valid") - elif command_status == 2: - LOG.error("An error occurred creating %s group : " % - group + "Invalid syntax") - else: - LOG.error("An error occurred creating %s group" % group) - - -class UsersHandler(object): - - def __init__(self, users): - self.users = users - - def apply_users(self): - """Create Linux/UNIX users and assign user IDs, groups and homedir.""" - if not self.users: - return - for user, properties in self.users.items(): - LOG.debug("%s user is being created" % user) - self._initialize_user(user, properties) - - def _initialize_user(self, user, properties): - uid = properties.get("uid", None) - homeDir = properties.get("homeDir", None) - - cmd = ['useradd', user] - - if uid is not None: - cmd.extend(['--uid', six.text_type(uid)]) - - if homeDir is not None: - cmd.extend(['--home', six.text_type(homeDir)]) - - if "groups" in properties: - groups = ','.join(properties["groups"]) - cmd.extend(['--groups', groups]) - - #Users are created as non-interactive system users with a shell - #of /sbin/nologin. This is by design and cannot be modified. - cmd.extend(['--shell', '/sbin/nologin']) - - command = CommandRunner(cmd) - command.run() - command_status = command.status - - if command_status == 0: - LOG.info("%s has been successfully created" % user) - elif command_status == 9: - LOG.error("An error occurred creating %s user : " % - user + "user name not unique") - elif command_status == 6: - LOG.error("An error occurred creating %s user : " % - user + "group does not exist") - elif command_status == 4: - LOG.error("An error occurred creating %s user : " % - user + "UID not unique") - elif command_status == 3: - LOG.error("An error occurred creating %s user : " % - user + "Invalid argument") - elif command_status == 2: - LOG.error("An error occurred creating %s user : " % - user + "Invalid syntax") - else: - LOG.error("An error occurred creating %s user" % user) - - -class MetadataServerConnectionError(Exception): - pass - - -class Metadata(object): - _metadata = None - _init_key = "AWS::CloudFormation::Init" - DEFAULT_PORT = 8000 - - def __init__(self, stack, resource, access_key=None, - secret_key=None, credentials_file=None, region=None, - configsets=None): - - self.stack = stack - self.resource = resource - self.access_key = access_key - self.secret_key = secret_key - self.region = region - self.credentials_file = credentials_file - self.access_key = access_key - self.secret_key = secret_key - self.configsets = configsets - - # TODO(asalkeld) is this metadata for the local resource? - self._is_local_metadata = True - self._metadata = None - self._has_changed = False - - def remote_metadata(self): - """Connect to the metadata server and retrieve the metadata.""" - - if self.credentials_file: - credentials = parse_creds_file(self.credentials_file) - access_key = credentials['AWSAccessKeyId'] - secret_key = credentials['AWSSecretKey'] - elif self.access_key and self.secret_key: - access_key = self.access_key - secret_key = self.secret_key - else: - raise MetadataServerConnectionError("No credentials!") - - port = metadata_server_port() or self.DEFAULT_PORT - - client = cloudformation.CloudFormationConnection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, port=port, - path="/v1", debug=0) - - res = client.describe_stack_resource(self.stack, self.resource) - # Note pending upstream patch will make this response a - # boto.cloudformation.stack.StackResourceDetail object - # which aligns better with all the existing calls - # see https://github.com/boto/boto/pull/857 - resource_detail = res['DescribeStackResourceResponse'][ - 'DescribeStackResourceResult']['StackResourceDetail'] - return resource_detail['Metadata'] - - def get_nova_meta(self, - cache_path='/var/lib/heat-cfntools/nova_meta.json'): - """Get nova's meta_data.json and cache it. - - Since this is called repeatedly return the cached metadata, - if we have it. - """ - - url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json' - if not os.path.exists(cache_path): - cmd = ['curl', '-o', cache_path, url] - CommandRunner(cmd).run() - try: - with open(cache_path) as fd: - try: - return json.load(fd) - except ValueError: - pass - except IOError: - pass - return None - - def get_instance_id(self): - """Get the unique identifier for this server.""" - instance_id = None - md = self.get_nova_meta() - if md is not None: - instance_id = md.get('uuid') - return instance_id - - def get_tags(self): - """Get the tags for this server.""" - tags = {} - md = self.get_nova_meta() - if md is not None: - tags.update(md.get('meta', {})) - tags['InstanceId'] = md['uuid'] - return tags - - def retrieve( - self, - meta_str=None, - default_path='/var/lib/heat-cfntools/cfn-init-data', - last_path='/var/cache/heat-cfntools/last_metadata'): - """Read the metadata from the given filename or from the remote server. - - Returns: - True -- success - False -- error - """ - if self.resource is not None: - res_last_path = last_path + '_' + self.resource - else: - res_last_path = last_path - - if meta_str: - self._data = meta_str - else: - try: - self._data = self.remote_metadata() - except MetadataServerConnectionError as ex: - LOG.warning( - "Unable to retrieve remote metadata : %s" % str(ex)) - - # If reading remote metadata fails, we fall-back on local files - # in order to get the most up-to-date version, we try: - # /var/cache/heat-cfntools/last_metadata, followed by - # /var/lib/heat-cfntools/cfn-init-data - # This should allow us to do the right thing both during the - # first cfn-init run (when we only have cfn-init-data), and - # in the event of a temporary interruption to connectivity - # affecting cfn-hup, in which case we want to use the locally - # cached metadata or the logic below could re-run a stale - # cfn-init-data - fd = None - for filepath in [res_last_path, last_path, default_path]: - try: - fd = open(filepath) - except IOError: - LOG.warning("Unable to open local metadata : %s" % - filepath) - continue - else: - LOG.info("Opened local metadata %s" % filepath) - break - - if fd: - self._data = fd.read() - fd.close() - else: - LOG.error("Unable to read any valid metadata!") - return - - if isinstance(self._data, str): - self._metadata = json.loads(self._data) - else: - self._metadata = self._data - - last_data = "" - for metadata_file in [res_last_path, last_path]: - try: - with open(metadata_file) as lm: - try: - last_data = json.load(lm) - except ValueError: - pass - lm.close() - except IOError: - LOG.warning("Unable to open local metadata : %s" % - metadata_file) - continue - - if self._metadata != last_data: - self._has_changed = True - - # if cache dir does not exist try to create it - cache_dir = os.path.dirname(last_path) - if not os.path.isdir(cache_dir): - try: - os.makedirs(cache_dir, mode=0o700) - except IOError as e: - LOG.warning('could not create metadata cache dir %s [%s]' % - (cache_dir, e)) - return - # save current metadata to file - tmp_dir = os.path.dirname(last_path) - with tempfile.NamedTemporaryFile(dir=tmp_dir, - mode='wb', - delete=False) as cf: - os.chmod(cf.name, 0o600) - cf.write(json.dumps(self._metadata).encode('UTF-8')) - os.rename(cf.name, last_path) - cf.close() - if res_last_path != last_path: - shutil.copy(last_path, res_last_path) - - return True - - def __str__(self): - return json.dumps(self._metadata) - - def display(self, key=None): - """Print the metadata to the standard output stream. By default the - full metadata is displayed but the ouptut can be limited to a specific - with the argument. - - Arguments: - key -- the metadata's key to display, nested keys can be specified - separating them by the dot character. - e.g., "foo.bar" - If the key contains a dot, it should be surrounded by single - quotes - e.g., "foo.'bar.1'" - """ - if self._metadata is None: - return - - if key is None: - print(str(self)) - return - - value = None - md = self._metadata - while True: - key_match = re.match(r'^(?:(?:\'([^\']+)\')|([^\.]+))(?:\.|$)', - key) - if not key_match: - break - - k = key_match.group(1) or key_match.group(2) - if isinstance(md, dict) and k in md: - key = key.replace(key_match.group(), '') - value = md = md[k] - else: - break - - if key != '': - value = None - - if value is not None: - print(json.dumps(value)) - - return - - def _is_valid_metadata(self): - """Should find the AWS::CloudFormation::Init json key.""" - is_valid = self._metadata and \ - self._init_key in self._metadata and \ - self._metadata[self._init_key] - if is_valid: - self._metadata = self._metadata[self._init_key] - return is_valid - - def _process_config(self, config="config"): - """Parse and process a config section. - - * packages - * sources - * groups - * users - * files - * commands - * services - """ - - try: - self._config = self._metadata[config] - except KeyError: - raise Exception("Could not find '%s' set in template, may need to" - " specify another set." % config) - PackagesHandler(self._config.get("packages")).apply_packages() - SourcesHandler(self._config.get("sources")).apply_sources() - GroupsHandler(self._config.get("groups")).apply_groups() - UsersHandler(self._config.get("users")).apply_users() - FilesHandler(self._config.get("files")).apply_files() - CommandsHandler(self._config.get("commands")).apply_commands() - ServicesHandler(self._config.get("services")).apply_services() - - def cfn_init(self): - """Process the resource metadata.""" - if not self._is_valid_metadata(): - raise Exception("invalid metadata") - else: - executionlist = ConfigsetsHandler(self._metadata.get("configSets"), - self.configsets).get_configsets() - if not executionlist: - self._process_config() - else: - for item in executionlist: - self._process_config(item) - - def cfn_hup(self, hooks): - """Process the resource metadata.""" - if not self._is_valid_metadata(): - LOG.debug( - 'Metadata does not contain a %s section' % self._init_key) - - if self._is_local_metadata: - self._config = self._metadata.get("config", {}) - s = self._config.get("services") - sh = ServicesHandler(s, resource=self.resource, hooks=hooks) - sh.monitor_services() - - if self._has_changed: - for h in hooks: - h.event('post.update', self.resource, self.resource) diff --git a/heat_cfntools/tests/__init__.py b/heat_cfntools/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/heat_cfntools/tests/test_cfn_helper.py b/heat_cfntools/tests/test_cfn_helper.py deleted file mode 100644 index 83bed76..0000000 --- a/heat_cfntools/tests/test_cfn_helper.py +++ /dev/null @@ -1,1419 +0,0 @@ -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import boto.cloudformation as cfn -import fixtures -import json -import mock -import os -import tempfile -import testtools -import testtools.matchers as ttm - -from heat_cfntools.cfntools import cfn_helper - - -def popen_root_calls(calls, shell=False): - kwargs = {'env': None, 'cwd': None, 'stderr': -1, 'stdout': -1, - 'shell': shell} - return [ - mock.call(call, **kwargs) - for call in calls - ] - - -class FakePOpen(): - def __init__(self, stdout='', stderr='', returncode=0): - self.returncode = returncode - self.stdout = stdout - self.stderr = stderr - - def communicate(self): - return (self.stdout, self.stderr) - - def wait(self): - pass - - -@mock.patch.object(cfn_helper.pwd, 'getpwnam') -@mock.patch.object(cfn_helper.os, 'seteuid') -@mock.patch.object(cfn_helper.os, 'geteuid') -class TestCommandRunner(testtools.TestCase): - - def test_command_runner(self, mock_geteuid, mock_seteuid, mock_getpwnam): - def returns(*args, **kwargs): - if args[0][0] == '/bin/command1': - return FakePOpen('All good') - elif args[0][0] == '/bin/command2': - return FakePOpen('Doing something', 'error', -1) - else: - raise Exception('This should never happen') - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - cmd2 = cfn_helper.CommandRunner(['/bin/command2']) - cmd1 = cfn_helper.CommandRunner(['/bin/command1'], - nextcommand=cmd2) - cmd1.run('root') - self.assertEqual( - 'CommandRunner:\n\tcommand: [\'/bin/command1\']\n\tstdout: ' - 'All good', - str(cmd1)) - self.assertEqual( - 'CommandRunner:\n\tcommand: [\'/bin/command2\']\n\tstatus: ' - '-1\n\tstdout: Doing something\n\tstderr: error', - str(cmd2)) - calls = popen_root_calls([['/bin/command1'], ['/bin/command2']]) - mock_popen.assert_has_calls(calls) - - def test_privileges_are_lowered_for_non_root_user(self, mock_geteuid, - mock_seteuid, - mock_getpwnam): - pw_entry = mock.Mock() - pw_entry.pw_uid = 1001 - mock_getpwnam.return_value = pw_entry - mock_geteuid.return_value = 0 - calls = [mock.call(1001), mock.call(0)] - with mock.patch('subprocess.Popen') as mock_popen: - command = ['/bin/command', '--option=value', 'arg1', 'arg2'] - cmd = cfn_helper.CommandRunner(command) - cmd.run(user='nonroot') - self.assertTrue(mock_geteuid.called) - mock_getpwnam.assert_called_once_with('nonroot') - mock_seteuid.assert_has_calls(calls) - self.assertTrue(mock_popen.called) - - def test_run_returns_when_cannot_set_privileges(self, mock_geteuid, - mock_seteuid, - mock_getpwnam): - msg = '[Error 1] Permission Denied' - mock_seteuid.side_effect = Exception(msg) - with mock.patch('subprocess.Popen') as mock_popen: - command = ['/bin/command2'] - cmd = cfn_helper.CommandRunner(command) - cmd.run(user='nonroot') - self.assertTrue(mock_getpwnam.called) - self.assertTrue(mock_seteuid.called) - self.assertFalse(mock_popen.called) - self.assertEqual(126, cmd.status) - self.assertEqual(msg, cmd.stderr) - - def test_privileges_are_restored_for_command_failure(self, mock_geteuid, - mock_seteuid, - mock_getpwnam): - pw_entry = mock.Mock() - pw_entry.pw_uid = 1001 - mock_getpwnam.return_value = pw_entry - mock_geteuid.return_value = 0 - calls = [mock.call(1001), mock.call(0)] - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = ValueError('Something wrong') - command = ['/bin/command', '--option=value', 'arg1', 'arg2'] - cmd = cfn_helper.CommandRunner(command) - self.assertRaises(ValueError, cmd.run, user='nonroot') - self.assertTrue(mock_geteuid.called) - mock_getpwnam.assert_called_once_with('nonroot') - mock_seteuid.assert_has_calls(calls) - self.assertTrue(mock_popen.called) - - -@mock.patch.object(cfn_helper, 'controlled_privileges') -class TestPackages(testtools.TestCase): - - def test_yum_install(self, mock_cp): - - def returns(*args, **kwargs): - if args[0][0] == 'rpm' and args[0][1] == '-q': - return FakePOpen(returncode=1) - else: - return FakePOpen(returncode=0) - - calls = [['which', 'yum']] - for pack in ('httpd', 'wordpress', 'mysql-server'): - calls.append(['rpm', '-q', pack]) - calls.append(['yum', '-y', '--showduplicates', 'list', - 'available', pack]) - calls = popen_root_calls(calls) - - packages = { - "yum": { - "mysql-server": [], - "httpd": [], - "wordpress": [] - } - } - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - cfn_helper.PackagesHandler(packages).apply_packages() - mock_popen.assert_has_calls(calls, any_order=True) - - def test_dnf_install_yum_unavailable(self, mock_cp): - - def returns(*args, **kwargs): - if ((args[0][0] == 'rpm' and args[0][1] == '-q') - or (args[0][0] == 'which' and args[0][1] == 'yum')): - return FakePOpen(returncode=1) - else: - return FakePOpen(returncode=0) - - calls = [['which', 'yum']] - for pack in ('httpd', 'wordpress', 'mysql-server'): - calls.append(['rpm', '-q', pack]) - calls.append(['dnf', '-y', '--showduplicates', 'list', - 'available', pack]) - calls = popen_root_calls(calls) - - packages = { - "yum": { - "mysql-server": [], - "httpd": [], - "wordpress": [] - } - } - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - cfn_helper.PackagesHandler(packages).apply_packages() - mock_popen.assert_has_calls(calls, any_order=True) - - def test_dnf_install(self, mock_cp): - - def returns(*args, **kwargs): - if args[0][0] == 'rpm' and args[0][1] == '-q': - return FakePOpen(returncode=1) - else: - return FakePOpen(returncode=0) - - calls = [] - for pack in ('httpd', 'wordpress', 'mysql-server'): - calls.append(['rpm', '-q', pack]) - calls.append(['dnf', '-y', '--showduplicates', 'list', - 'available', pack]) - calls = popen_root_calls(calls) - - packages = { - "dnf": { - "mysql-server": [], - "httpd": [], - "wordpress": [] - } - } - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - cfn_helper.PackagesHandler(packages).apply_packages() - mock_popen.assert_has_calls(calls, any_order=True) - - def test_zypper_install(self, mock_cp): - - def returns(*args, **kwargs): - if args[0][0].startswith('rpm') and args[0][1].startswith('-q'): - return FakePOpen(returncode=1) - else: - return FakePOpen(returncode=0) - - calls = [] - for pack in ('httpd', 'wordpress', 'mysql-server'): - calls.append(['rpm', '-q', pack]) - calls.append(['zypper', '-n', '--no-refresh', 'search', pack]) - calls = popen_root_calls(calls) - - packages = { - "zypper": { - "mysql-server": [], - "httpd": [], - "wordpress": [] - } - } - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - cfn_helper.PackagesHandler(packages).apply_packages() - mock_popen.assert_has_calls(calls, any_order=True) - - def test_apt_install(self, mock_cp): - packages = { - "apt": { - "mysql-server": [], - "httpd": [], - "wordpress": [] - } - } - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen(returncode=0) - cfn_helper.PackagesHandler(packages).apply_packages() - self.assertTrue(mock_popen.called) - - -@mock.patch.object(cfn_helper, 'controlled_privileges') -class TestServicesHandler(testtools.TestCase): - - def test_services_handler_systemd(self, mock_cp): - calls = [] - returns = [] - - # apply_services - calls.append(['/bin/systemctl', 'enable', 'httpd.service']) - returns.append(FakePOpen()) - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/bin/systemctl', 'start', 'httpd.service']) - returns.append(FakePOpen()) - calls.append(['/bin/systemctl', 'enable', 'mysqld.service']) - returns.append(FakePOpen()) - calls.append(['/bin/systemctl', 'status', 'mysqld.service']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/bin/systemctl', 'start', 'mysqld.service']) - returns.append(FakePOpen()) - - # monitor_services not running - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/bin/systemctl', 'start', 'httpd.service']) - returns.append(FakePOpen()) - - calls = popen_root_calls(calls) - - calls.extend(popen_root_calls(['/bin/services_restarted'], shell=True)) - returns.append(FakePOpen()) - - calls.extend(popen_root_calls([['/bin/systemctl', 'status', - 'mysqld.service']])) - returns.append(FakePOpen(returncode=-1)) - calls.extend(popen_root_calls([['/bin/systemctl', 'start', - 'mysqld.service']])) - returns.append(FakePOpen()) - - calls.extend(popen_root_calls(['/bin/services_restarted'], shell=True)) - returns.append(FakePOpen()) - - # monitor_services running - calls.extend(popen_root_calls([['/bin/systemctl', 'status', - 'httpd.service']])) - returns.append(FakePOpen()) - calls.extend(popen_root_calls([['/bin/systemctl', 'status', - 'mysqld.service']])) - returns.append(FakePOpen()) - - #calls = popen_root_calls(calls) - - services = { - "systemd": { - "mysqld": {"enabled": "true", "ensureRunning": "true"}, - "httpd": {"enabled": "true", "ensureRunning": "true"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - # services not running - sh.monitor_services() - - # services running - sh.monitor_services() - mock_popen.assert_has_calls(calls, any_order=True) - mock_exists.assert_called_with('/bin/systemctl') - - def test_services_handler_systemd_disabled(self, mock_cp): - calls = [] - - # apply_services - calls.append(['/bin/systemctl', 'disable', 'httpd.service']) - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - calls.append(['/bin/systemctl', 'stop', 'httpd.service']) - calls.append(['/bin/systemctl', 'disable', 'mysqld.service']) - calls.append(['/bin/systemctl', 'status', 'mysqld.service']) - calls.append(['/bin/systemctl', 'stop', 'mysqld.service']) - calls = popen_root_calls(calls) - - services = { - "systemd": { - "mysqld": {"enabled": "false", "ensureRunning": "false"}, - "httpd": {"enabled": "false", "ensureRunning": "false"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen() - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - mock_popen.assert_has_calls(calls, any_order=True) - mock_exists.assert_called_with('/bin/systemctl') - - def test_services_handler_sysv_service_chkconfig(self, mock_cp): - - def exists(*args, **kwargs): - return args[0] != '/bin/systemctl' - - calls = [] - returns = [] - - # apply_services - calls.append(['/sbin/chkconfig', 'httpd', 'on']) - returns.append(FakePOpen()) - calls.append(['/sbin/service', 'httpd', 'status']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/sbin/service', 'httpd', 'start']) - returns.append(FakePOpen()) - - # monitor_services not running - calls.append(['/sbin/service', 'httpd', 'status']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/sbin/service', 'httpd', 'start']) - returns.append(FakePOpen()) - - calls = popen_root_calls(calls) - - calls.extend(popen_root_calls(['/bin/services_restarted'], shell=True)) - returns.append(FakePOpen()) - - # monitor_services running - calls.extend(popen_root_calls([['/sbin/service', 'httpd', 'status']])) - returns.append(FakePOpen()) - - services = { - "sysvinit": { - "httpd": {"enabled": "true", "ensureRunning": "true"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.side_effect = exists - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - # services not running - sh.monitor_services() - - # services running - sh.monitor_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_any_call('/bin/systemctl') - mock_exists.assert_any_call('/sbin/service') - mock_exists.assert_any_call('/sbin/chkconfig') - - def test_services_handler_sysv_disabled_service_chkconfig(self, mock_cp): - def exists(*args, **kwargs): - return args[0] != '/bin/systemctl' - - calls = [] - - # apply_services - calls.append(['/sbin/chkconfig', 'httpd', 'off']) - calls.append(['/sbin/service', 'httpd', 'status']) - calls.append(['/sbin/service', 'httpd', 'stop']) - - calls = popen_root_calls(calls) - - services = { - "sysvinit": { - "httpd": {"enabled": "false", "ensureRunning": "false"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.side_effect = exists - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen() - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_any_call('/bin/systemctl') - mock_exists.assert_any_call('/sbin/service') - mock_exists.assert_any_call('/sbin/chkconfig') - - def test_services_handler_sysv_systemctl(self, mock_cp): - calls = [] - returns = [] - - # apply_services - calls.append(['/bin/systemctl', 'enable', 'httpd.service']) - returns.append(FakePOpen()) - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/bin/systemctl', 'start', 'httpd.service']) - returns.append(FakePOpen()) - - # monitor_services not running - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/bin/systemctl', 'start', 'httpd.service']) - returns.append(FakePOpen()) - - shell_calls = [] - shell_calls.append('/bin/services_restarted') - returns.append(FakePOpen()) - - calls = popen_root_calls(calls) - calls.extend(popen_root_calls(shell_calls, shell=True)) - - # monitor_services running - calls.extend(popen_root_calls([['/bin/systemctl', 'status', - 'httpd.service']])) - returns.append(FakePOpen()) - - services = { - "sysvinit": { - "httpd": {"enabled": "true", "ensureRunning": "true"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - # services not running - sh.monitor_services() - - # services running - sh.monitor_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_called_with('/bin/systemctl') - - def test_services_handler_sysv_disabled_systemctl(self, mock_cp): - calls = [] - - # apply_services - calls.append(['/bin/systemctl', 'disable', 'httpd.service']) - calls.append(['/bin/systemctl', 'status', 'httpd.service']) - calls.append(['/bin/systemctl', 'stop', 'httpd.service']) - - calls = popen_root_calls(calls) - - services = { - "sysvinit": { - "httpd": {"enabled": "false", "ensureRunning": "false"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = True - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen() - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_called_with('/bin/systemctl') - - def test_services_handler_sysv_service_updaterc(self, mock_cp): - calls = [] - returns = [] - - # apply_services - calls.append(['/usr/sbin/update-rc.d', 'httpd', 'enable']) - returns.append(FakePOpen()) - calls.append(['/usr/sbin/service', 'httpd', 'status']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/usr/sbin/service', 'httpd', 'start']) - returns.append(FakePOpen()) - - # monitor_services not running - calls.append(['/usr/sbin/service', 'httpd', 'status']) - returns.append(FakePOpen(returncode=-1)) - calls.append(['/usr/sbin/service', 'httpd', 'start']) - returns.append(FakePOpen()) - - shell_calls = [] - shell_calls.append('/bin/services_restarted') - returns.append(FakePOpen()) - - calls = popen_root_calls(calls) - calls.extend(popen_root_calls(shell_calls, shell=True)) - - # monitor_services running - calls.extend(popen_root_calls([['/usr/sbin/service', 'httpd', - 'status']])) - returns.append(FakePOpen()) - - services = { - "sysvinit": { - "httpd": {"enabled": "true", "ensureRunning": "true"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = False - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - # services not running - sh.monitor_services() - - # services running - sh.monitor_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_any_call('/bin/systemctl') - mock_exists.assert_any_call('/sbin/service') - mock_exists.assert_any_call('/sbin/chkconfig') - - def test_services_handler_sysv_disabled_service_updaterc(self, mock_cp): - calls = [] - returns = [] - - # apply_services - calls.append(['/usr/sbin/update-rc.d', 'httpd', 'disable']) - returns.append(FakePOpen()) - calls.append(['/usr/sbin/service', 'httpd', 'status']) - returns.append(FakePOpen()) - calls.append(['/usr/sbin/service', 'httpd', 'stop']) - returns.append(FakePOpen()) - - calls = popen_root_calls(calls) - - services = { - "sysvinit": { - "httpd": {"enabled": "false", "ensureRunning": "false"} - } - } - hooks = [ - cfn_helper.Hook( - 'hook1', - 'service.restarted', - 'Resources.resource1.Metadata', - 'root', - '/bin/services_restarted') - ] - - with mock.patch('os.path.exists') as mock_exists: - mock_exists.return_value = False - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - sh = cfn_helper.ServicesHandler(services, 'resource1', hooks) - sh.apply_services() - mock_popen.assert_has_calls(calls) - mock_exists.assert_any_call('/bin/systemctl') - mock_exists.assert_any_call('/sbin/service') - mock_exists.assert_any_call('/sbin/chkconfig') - - -class TestHupConfig(testtools.TestCase): - - def test_load_main_section(self): - fcreds = tempfile.NamedTemporaryFile() - fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n'.encode('UTF-8')) - fcreds.flush() - - main_conf = tempfile.NamedTemporaryFile() - main_conf.write(('''[main] -stack=teststack -credential-file=%s''' % fcreds.name).encode('UTF-8')) - main_conf.flush() - mainconfig = cfn_helper.HupConfig([open(main_conf.name)]) - self.assertEqual( - '{stack: teststack, credential_file: %s, ' - 'region: nova, interval:10}' % fcreds.name, - str(mainconfig)) - main_conf.close() - - main_conf = tempfile.NamedTemporaryFile() - main_conf.write(('''[main] -stack=teststack -region=region1 -credential-file=%s-invalid -interval=120''' % fcreds.name).encode('UTF-8')) - main_conf.flush() - e = self.assertRaises(Exception, cfn_helper.HupConfig, - [open(main_conf.name)]) - self.assertIn('invalid credentials file', str(e)) - fcreds.close() - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_hup_config(self, mock_cp): - hooks_conf = tempfile.NamedTemporaryFile() - - def write_hook_conf(f, name, triggers, path, action): - f.write(( - '[%s]\ntriggers=%s\npath=%s\naction=%s\nrunas=root\n\n' % ( - name, triggers, path, action)).encode('UTF-8')) - - write_hook_conf( - hooks_conf, - 'hook2', - 'service2.restarted', - 'Resources.resource2.Metadata', - '/bin/hook2') - write_hook_conf( - hooks_conf, - 'hook1', - 'service1.restarted', - 'Resources.resource1.Metadata', - '/bin/hook1') - write_hook_conf( - hooks_conf, - 'hook3', - 'service3.restarted', - 'Resources.resource3.Metadata', - '/bin/hook3') - write_hook_conf( - hooks_conf, - 'cfn-http-restarted', - 'service.restarted', - 'Resources.resource.Metadata', - '/bin/cfn-http-restarted') - hooks_conf.flush() - - fcreds = tempfile.NamedTemporaryFile() - fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n'.encode('UTF-8')) - fcreds.flush() - - main_conf = tempfile.NamedTemporaryFile() - main_conf.write(('''[main] -stack=teststack -credential-file=%s -region=region1 -interval=120''' % fcreds.name).encode('UTF-8')) - main_conf.flush() - - mainconfig = cfn_helper.HupConfig([ - open(main_conf.name), - open(hooks_conf.name)]) - unique_resources = mainconfig.unique_resources_get() - self.assertThat([ - 'resource', - 'resource1', - 'resource2', - 'resource3', - ], ttm.Equals(sorted(unique_resources))) - - hooks = sorted(mainconfig.hooks, - key=lambda hook: hook.resource_name_get()) - self.assertEqual(len(hooks), 4) - self.assertEqual( - '{cfn-http-restarted, service.restarted,' - ' Resources.resource.Metadata, root, /bin/cfn-http-restarted}', - str(hooks[0])) - self.assertEqual( - '{hook1, service1.restarted, Resources.resource1.Metadata,' - ' root, /bin/hook1}', str(hooks[1])) - self.assertEqual( - '{hook2, service2.restarted, Resources.resource2.Metadata,' - ' root, /bin/hook2}', str(hooks[2])) - self.assertEqual( - '{hook3, service3.restarted, Resources.resource3.Metadata,' - ' root, /bin/hook3}', str(hooks[3])) - - calls = [] - calls.extend(popen_root_calls(['/bin/cfn-http-restarted'], shell=True)) - calls.extend(popen_root_calls(['/bin/hook1'], shell=True)) - calls.extend(popen_root_calls(['/bin/hook2'], shell=True)) - calls.extend(popen_root_calls(['/bin/hook3'], shell=True)) - #calls = popen_root_calls(calls) - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('All good') - - for hook in hooks: - hook.event(hook.triggers, None, hook.resource_name_get()) - - hooks_conf.close() - fcreds.close() - main_conf.close() - mock_popen.assert_has_calls(calls) - - -class TestCfnHelper(testtools.TestCase): - - def _check_metadata_content(self, content, value): - with tempfile.NamedTemporaryFile() as metadata_info: - metadata_info.write(content.encode('UTF-8')) - metadata_info.flush() - port = cfn_helper.metadata_server_port(metadata_info.name) - self.assertEqual(value, port) - - def test_metadata_server_port(self): - self._check_metadata_content("http://172.20.42.42:8000\n", 8000) - - def test_metadata_server_port_https(self): - self._check_metadata_content("https://abc.foo.bar:6969\n", 6969) - - def test_metadata_server_port_noport(self): - self._check_metadata_content("http://172.20.42.42\n", None) - - def test_metadata_server_port_justip(self): - self._check_metadata_content("172.20.42.42", None) - - def test_metadata_server_port_weird(self): - self._check_metadata_content("::::", None) - self._check_metadata_content("beforecolons:aftercolons", None) - - def test_metadata_server_port_emptyfile(self): - self._check_metadata_content("\n", None) - self._check_metadata_content("", None) - - def test_metadata_server_nofile(self): - random_filename = self.getUniqueString() - self.assertIsNone(cfn_helper.metadata_server_port(random_filename)) - - def test_to_boolean(self): - self.assertTrue(cfn_helper.to_boolean(True)) - self.assertTrue(cfn_helper.to_boolean('true')) - self.assertTrue(cfn_helper.to_boolean('yes')) - self.assertTrue(cfn_helper.to_boolean('1')) - self.assertTrue(cfn_helper.to_boolean(1)) - - self.assertFalse(cfn_helper.to_boolean(False)) - self.assertFalse(cfn_helper.to_boolean('false')) - self.assertFalse(cfn_helper.to_boolean('no')) - self.assertFalse(cfn_helper.to_boolean('0')) - self.assertFalse(cfn_helper.to_boolean(0)) - self.assertFalse(cfn_helper.to_boolean(None)) - self.assertFalse(cfn_helper.to_boolean('fingle')) - - def test_parse_creds_file(self): - def parse_creds_test(file_contents, creds_match): - with tempfile.NamedTemporaryFile(mode='w') as fcreds: - fcreds.write(file_contents) - fcreds.flush() - creds = cfn_helper.parse_creds_file(fcreds.name) - self.assertThat(creds_match, ttm.Equals(creds)) - parse_creds_test( - 'AWSAccessKeyId=foo\nAWSSecretKey=bar\n', - {'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'} - ) - parse_creds_test( - 'AWSAccessKeyId =foo\nAWSSecretKey= bar\n', - {'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'} - ) - parse_creds_test( - 'AWSAccessKeyId = foo\nAWSSecretKey = bar\n', - {'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'} - ) - - -class TestMetadataRetrieve(testtools.TestCase): - - def setUp(self): - super(TestMetadataRetrieve, self).setUp() - self.tdir = self.useFixture(fixtures.TempDir()) - self.last_file = os.path.join(self.tdir.path, 'last_metadata') - - def test_metadata_retrieve_files(self): - - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - - with tempfile.NamedTemporaryFile(mode='w+') as default_file: - default_file.write(md_str) - default_file.flush() - self.assertThat(default_file.name, ttm.FileContains(md_str)) - - self.assertTrue( - md.retrieve(default_path=default_file.name, - last_path=self.last_file)) - - self.assertThat(self.last_file, ttm.FileContains(md_str)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(default_path=default_file.name, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - - def test_metadata_retrieve_none(self): - - md = cfn_helper.Metadata('teststack', None) - default_file = os.path.join(self.tdir.path, 'default_file') - - self.assertFalse(md.retrieve(default_path=default_file, - last_path=self.last_file)) - self.assertIsNone(md._metadata) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display() - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), "") - - def test_metadata_retrieve_passed(self): - - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display() - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), - "{\"AWS::CloudFormation::Init\": {\"config\": {" - "\"files\": {\"/tmp/foo\": {\"content\": \"bar\"}" - "}}}}\n") - - def test_metadata_retrieve_by_key_passed(self): - - md_data = {"foo": {"bar": {"fred.1": "abcd"}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display("foo") - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), - "{\"bar\": {\"fred.1\": \"abcd\"}}\n") - - def test_metadata_retrieve_by_nested_key_passed(self): - - md_data = {"foo": {"bar": {"fred.1": "abcd"}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display("foo.bar.'fred.1'") - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), - '"abcd"\n') - - def test_metadata_retrieve_key_none(self): - - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display("no_key") - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), "") - - def test_metadata_retrieve_by_nested_key_none(self): - - md_data = {"foo": {"bar": {"fred.1": "abcd"}}} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display("foo.fred") - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), "") - - def test_metadata_retrieve_by_nested_key_none_with_matching_string(self): - - md_data = {"foo": "bar"} - md_str = json.dumps(md_data) - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue(md.retrieve(meta_str=md_data, - last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertEqual(md_str, str(md)) - - displayed = self.useFixture(fixtures.StringStream('stdout')) - fake_stdout = displayed.stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout)) - md.display("foo.bar") - fake_stdout.flush() - self.assertEqual(displayed.getDetails()['stdout'].as_text(), "") - - def test_metadata_creates_cache(self): - temp_home = tempfile.mkdtemp() - - def cleanup_temp_home(thome): - os.unlink(os.path.join(thome, 'cache', 'last_metadata')) - os.rmdir(os.path.join(thome, 'cache')) - os.rmdir(os.path.join(thome)) - - self.addCleanup(cleanup_temp_home, temp_home) - - last_path = os.path.join(temp_home, 'cache', 'last_metadata') - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - md_str = json.dumps(md_data) - md = cfn_helper.Metadata('teststack', None) - - self.assertFalse(os.path.exists(last_path), - "last_metadata file already exists") - self.assertTrue(md.retrieve(meta_str=md_str, last_path=last_path)) - self.assertTrue(os.path.exists(last_path), - "last_metadata file should exist") - # Ensure created dirs and file have right perms - self.assertTrue(os.stat(last_path).st_mode & 0o600 == 0o600) - self.assertTrue( - os.stat(os.path.dirname(last_path)).st_mode & 0o700 == 0o700) - - def test_is_valid_metadata(self): - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue( - md.retrieve(meta_str=md_data, last_path=self.last_file)) - - self.assertThat(md_data, ttm.Equals(md._metadata)) - self.assertTrue(md._is_valid_metadata()) - self.assertThat( - md_data['AWS::CloudFormation::Init'], ttm.Equals(md._metadata)) - - def test_remote_metadata(self): - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - "/tmp/foo": {"content": "bar"}}}}} - - with mock.patch.object( - cfn.CloudFormationConnection, 'describe_stack_resource' - ) as mock_dsr: - mock_dsr.return_value = { - 'DescribeStackResourceResponse': { - 'DescribeStackResourceResult': { - 'StackResourceDetail': {'Metadata': md_data}}}} - md = cfn_helper.Metadata( - 'teststack', - None, - access_key='foo', - secret_key='bar') - self.assertTrue(md.retrieve(last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - - with tempfile.NamedTemporaryFile(mode='w') as fcreds: - fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n') - fcreds.flush() - md = cfn_helper.Metadata( - 'teststack', None, credentials_file=fcreds.name) - self.assertTrue(md.retrieve(last_path=self.last_file)) - self.assertThat(md_data, ttm.Equals(md._metadata)) - - def test_nova_meta_with_cache(self): - meta_in = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f", - "availability_zone": "nova", - "hostname": "as-wikidatabase-4ykioj3lgi57.novalocal", - "launch_index": 0, - "meta": {}, - "public_keys": {"heat_key": "ssh-rsa etc...\n"}, - "name": "as-WikiDatabase-4ykioj3lgi57"} - md_str = json.dumps(meta_in) - - md = cfn_helper.Metadata('teststack', None) - with tempfile.NamedTemporaryFile(mode='w+') as default_file: - default_file.write(md_str) - default_file.flush() - self.assertThat(default_file.name, ttm.FileContains(md_str)) - meta_out = md.get_nova_meta(cache_path=default_file.name) - - self.assertEqual(meta_in, meta_out) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_nova_meta_curl(self, mock_cp): - url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json' - temp_home = tempfile.mkdtemp() - cache_path = os.path.join(temp_home, 'meta_data.json') - - def cleanup_temp_home(thome): - os.unlink(cache_path) - os.rmdir(thome) - - self.addCleanup(cleanup_temp_home, temp_home) - - meta_in = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f", - "availability_zone": "nova", - "hostname": "as-wikidatabase-4ykioj3lgi57.novalocal", - "launch_index": 0, - "meta": {"freddy": "is hungry"}, - "public_keys": {"heat_key": "ssh-rsa etc...\n"}, - "name": "as-WikiDatabase-4ykioj3lgi57"} - md_str = json.dumps(meta_in) - - def write_cache_file(*params, **kwargs): - with open(cache_path, 'w+') as cache_file: - cache_file.write(md_str) - cache_file.flush() - self.assertThat(cache_file.name, ttm.FileContains(md_str)) - return FakePOpen('Downloaded', '', 0) - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = write_cache_file - md = cfn_helper.Metadata('teststack', None) - meta_out = md.get_nova_meta(cache_path=cache_path) - self.assertEqual(meta_in, meta_out) - mock_popen.assert_has_calls( - popen_root_calls([['curl', '-o', cache_path, url]])) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_nova_meta_curl_corrupt(self, mock_cp): - url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json' - temp_home = tempfile.mkdtemp() - cache_path = os.path.join(temp_home, 'meta_data.json') - - def cleanup_temp_home(thome): - os.unlink(cache_path) - os.rmdir(thome) - - self.addCleanup(cleanup_temp_home, temp_home) - - md_str = "this { is not really json" - - def write_cache_file(*params, **kwargs): - with open(cache_path, 'w+') as cache_file: - cache_file.write(md_str) - cache_file.flush() - self.assertThat(cache_file.name, ttm.FileContains(md_str)) - return FakePOpen('Downloaded', '', 0) - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = write_cache_file - md = cfn_helper.Metadata('teststack', None) - meta_out = md.get_nova_meta(cache_path=cache_path) - self.assertIsNone(meta_out) - mock_popen.assert_has_calls( - popen_root_calls([['curl', '-o', cache_path, url]])) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_nova_meta_curl_failed(self, mock_cp): - url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json' - temp_home = tempfile.mkdtemp() - cache_path = os.path.join(temp_home, 'meta_data.json') - - def cleanup_temp_home(thome): - os.rmdir(thome) - - self.addCleanup(cleanup_temp_home, temp_home) - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('Failed', '', 1) - md = cfn_helper.Metadata('teststack', None) - meta_out = md.get_nova_meta(cache_path=cache_path) - self.assertIsNone(meta_out) - mock_popen.assert_has_calls( - popen_root_calls([['curl', '-o', cache_path, url]])) - - def test_get_tags(self): - fake_tags = {'foo': 'fee', - 'apple': 'red'} - md_data = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f", - "availability_zone": "nova", - "hostname": "as-wikidatabase-4ykioj3lgi57.novalocal", - "launch_index": 0, - "meta": fake_tags, - "public_keys": {"heat_key": "ssh-rsa etc...\n"}, - "name": "as-WikiDatabase-4ykioj3lgi57"} - tags_expect = fake_tags - tags_expect['InstanceId'] = md_data['uuid'] - - md = cfn_helper.Metadata('teststack', None) - - with mock.patch.object(md, 'get_nova_meta') as mock_method: - mock_method.return_value = md_data - tags = md.get_tags() - mock_method.assert_called_once_with() - - self.assertEqual(tags_expect, tags) - - def test_get_instance_id(self): - uuid = "f9431d18-d971-434d-9044-5b38f5b4646f" - md_data = {"uuid": uuid, - "availability_zone": "nova", - "hostname": "as-wikidatabase-4ykioj3lgi57.novalocal", - "launch_index": 0, - "public_keys": {"heat_key": "ssh-rsa etc...\n"}, - "name": "as-WikiDatabase-4ykioj3lgi57"} - - md = cfn_helper.Metadata('teststack', None) - - with mock.patch.object(md, 'get_nova_meta') as mock_method: - mock_method.return_value = md_data - self.assertEqual(md.get_instance_id(), uuid) - mock_method.assert_called_once_with() - - -class TestCfnInit(testtools.TestCase): - - def setUp(self): - super(TestCfnInit, self).setUp() - self.tdir = self.useFixture(fixtures.TempDir()) - self.last_file = os.path.join(self.tdir.path, 'last_metadata') - - def test_cfn_init(self): - - with tempfile.NamedTemporaryFile(mode='w+') as foo_file: - md_data = {"AWS::CloudFormation::Init": {"config": {"files": { - foo_file.name: {"content": "bar"}}}}} - - md = cfn_helper.Metadata('teststack', None) - self.assertTrue( - md.retrieve(meta_str=md_data, last_path=self.last_file)) - md.cfn_init() - self.assertThat(foo_file.name, ttm.FileContains('bar')) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_cfn_init_with_ignore_errors_false(self, mock_cp): - md_data = {"AWS::CloudFormation::Init": {"config": {"commands": { - "00_foo": {"command": "/bin/command1", - "ignoreErrors": "false"}}}}} - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('Doing something', 'error', -1) - md = cfn_helper.Metadata('teststack', None) - self.assertTrue( - md.retrieve(meta_str=md_data, last_path=self.last_file)) - self.assertRaises(cfn_helper.CommandsHandlerRunError, md.cfn_init) - mock_popen.assert_has_calls(popen_root_calls(['/bin/command1'], - shell=True)) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_cfn_init_with_ignore_errors_true(self, mock_cp): - calls = [] - returns = [] - calls.extend(popen_root_calls(['/bin/command1'], shell=True)) - returns.append(FakePOpen('Doing something', 'error', -1)) - calls.extend(popen_root_calls(['/bin/command2'], shell=True)) - returns.append(FakePOpen('All good')) - #calls = popen_root_calls(calls) - - md_data = {"AWS::CloudFormation::Init": {"config": {"commands": { - "00_foo": {"command": "/bin/command1", - "ignoreErrors": "true"}, - "01_bar": {"command": "/bin/command2", - "ignoreErrors": "false"} - }}}} - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - md = cfn_helper.Metadata('teststack', None) - self.assertTrue( - md.retrieve(meta_str=md_data, last_path=self.last_file)) - md.cfn_init() - mock_popen.assert_has_calls(calls) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_cfn_init_runs_list_commands_without_shell(self, mock_cp): - calls = [] - returns = [] - # command supplied as list shouldn't run on shell - calls.extend(popen_root_calls([['/bin/command1', 'arg']], shell=False)) - returns.append(FakePOpen('Doing something')) - # command supplied as string should run on shell - calls.extend(popen_root_calls(['/bin/command2'], shell=True)) - returns.append(FakePOpen('All good')) - - md_data = {"AWS::CloudFormation::Init": {"config": {"commands": { - "00_foo": {"command": ["/bin/command1", "arg"]}, - "01_bar": {"command": "/bin/command2"} - }}}} - - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.side_effect = returns - md = cfn_helper.Metadata('teststack', None) - self.assertTrue( - md.retrieve(meta_str=md_data, last_path=self.last_file)) - md.cfn_init() - mock_popen.assert_has_calls(calls) - - -class TestSourcesHandler(testtools.TestCase): - def test_apply_sources_empty(self): - sh = cfn_helper.SourcesHandler({}) - sh.apply_sources() - - def _test_apply_sources(self, url, end_file): - dest = tempfile.mkdtemp() - self.addCleanup(os.rmdir, dest) - sources = {dest: url} - td = os.path.dirname(end_file) - er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -" - calls = popen_root_calls([er % (dest, dest, url)], shell=True) - - with mock.patch.object(tempfile, 'mkdtemp') as mock_mkdtemp: - mock_mkdtemp.return_value = td - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('Curl good') - sh = cfn_helper.SourcesHandler(sources) - sh.apply_sources() - mock_popen.assert_has_calls(calls) - mock_mkdtemp.assert_called_with() - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_apply_sources_github(self, mock_cp): - url = "https://github.com/NoSuchProject/tarball/NoSuchTarball" - dest = tempfile.mkdtemp() - self.addCleanup(os.rmdir, dest) - sources = {dest: url} - er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -" - calls = popen_root_calls([er % (dest, dest, url)], shell=True) - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('Curl good') - sh = cfn_helper.SourcesHandler(sources) - sh.apply_sources() - mock_popen.assert_has_calls(calls) - - @mock.patch.object(cfn_helper, 'controlled_privileges') - def test_apply_sources_general(self, mock_cp): - url = "https://website.no.existe/a/b/c/file.tar.gz" - dest = tempfile.mkdtemp() - self.addCleanup(os.rmdir, dest) - sources = {dest: url} - er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -" - calls = popen_root_calls([er % (dest, dest, url)], shell=True) - with mock.patch('subprocess.Popen') as mock_popen: - mock_popen.return_value = FakePOpen('Curl good') - sh = cfn_helper.SourcesHandler(sources) - sh.apply_sources() - mock_popen.assert_has_calls(calls) - - def test_apply_source_cmd(self): - sh = cfn_helper.SourcesHandler({}) - er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | %s | tar -xvf -" - dest = '/tmp' - # test tgz - url = 'http://www.example.com/a.tgz' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "gunzip"), cmd) - # test tar.gz - url = 'http://www.example.com/a.tar.gz' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "gunzip"), cmd) - # test github - tarball 1 - url = 'https://github.com/openstack/heat-cfntools/tarball/master' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "gunzip"), cmd) - # test github - tarball 2 - url = 'https://github.com/openstack/heat-cfntools/tarball/master/' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "gunzip"), cmd) - # test tbz2 - url = 'http://www.example.com/a.tbz2' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "bunzip2"), cmd) - # test tar.bz2 - url = 'http://www.example.com/a.tar.bz2' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "bunzip2"), cmd) - # test zip - er = "mkdir -p '%s'; cd '%s'; curl -s -o '%s' '%s' && unzip -o '%s'" - url = 'http://www.example.com/a.zip' - d = "/tmp/tmp2I0yNK" - tmp = "%s/a.zip" % d - with mock.patch.object(tempfile, 'mkdtemp') as mock_mkdtemp: - mock_mkdtemp.return_value = d - - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, tmp, url, tmp), cmd) - # test gz - er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | %s > '%s'" - url = 'http://www.example.com/a.sh.gz' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "gunzip", "a.sh"), cmd) - # test bz2 - url = 'http://www.example.com/a.sh.bz2' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual(er % (dest, dest, url, "bunzip2", "a.sh"), cmd) - # test other - url = 'http://www.example.com/a.sh' - cmd = sh._apply_source_cmd(dest, url) - self.assertEqual("", cmd) - mock_mkdtemp.assert_called_with() diff --git a/heat_cfntools/tests/test_cfn_hup.py b/heat_cfntools/tests/test_cfn_hup.py deleted file mode 100644 index d5cf026..0000000 --- a/heat_cfntools/tests/test_cfn_hup.py +++ /dev/null @@ -1,91 +0,0 @@ -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -import mock -import tempfile -import testtools - -from heat_cfntools.cfntools import cfn_helper - - -class TestCfnHup(testtools.TestCase): - - def setUp(self): - super(TestCfnHup, self).setUp() - self.logger = self.useFixture(fixtures.FakeLogger()) - self.stack_name = self.getUniqueString() - self.resource = self.getUniqueString() - self.region = self.getUniqueString() - self.creds = tempfile.NamedTemporaryFile() - self.metadata = cfn_helper.Metadata(self.stack_name, - self.resource, - credentials_file=self.creds.name, - region=self.region) - self.init_content = self.getUniqueString() - self.init_temp = tempfile.NamedTemporaryFile() - self.service_name = self.getUniqueString() - self.init_section = {'AWS::CloudFormation::Init': { - 'config': { - 'services': { - 'sysvinit': { - self.service_name: { - 'enabled': True, - 'ensureRunning': True, - } - } - }, - 'files': { - self.init_temp.name: { - 'content': self.init_content - } - } - } - } - } - - def _mock_retrieve_metadata(self, desired_metadata): - with mock.patch.object( - cfn_helper.Metadata, 'remote_metadata') as mock_method: - mock_method.return_value = desired_metadata - with tempfile.NamedTemporaryFile() as last_md: - self.metadata.retrieve(last_path=last_md.name) - - def _test_cfn_hup_metadata(self, metadata): - - self._mock_retrieve_metadata(metadata) - FakeServicesHandler = mock.Mock() - FakeServicesHandler.monitor_services.return_value = None - self.useFixture( - fixtures.MonkeyPatch( - 'heat_cfntools.cfntools.cfn_helper.ServicesHandler', - FakeServicesHandler)) - - section = self.getUniqueString() - triggers = 'post.add,post.delete,post.update' - path = 'Resources.%s.Metadata' % self.resource - runas = 'root' - action = '/bin/sh -c "true"' - hook = cfn_helper.Hook(section, triggers, path, runas, action) - - with mock.patch.object(cfn_helper.Hook, 'event') as mock_method: - mock_method.return_value = None - self.metadata.cfn_hup([hook]) - - def test_cfn_hup_empty_metadata(self): - self._test_cfn_hup_metadata({}) - - def test_cfn_hup_cfn_init_metadata(self): - self._test_cfn_hup_metadata(self.init_section) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e595465..0000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -pbr>=0.6,!=0.7,<1.0 -boto>=2.12.0,!=2.13.0 -psutil>=1.1.1,<2.0.0 -six>=1.9.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 64195d8..0000000 --- a/setup.cfg +++ /dev/null @@ -1,43 +0,0 @@ -[metadata] -name = heat-cfntools -summary = Tools required to be installed on Heat provisioned cloud instances -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://www.openstack.org/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[files] -packages = - heat_cfntools -scripts = - bin/cfn-create-aws-symlinks - bin/cfn-get-metadata - bin/cfn-hup - bin/cfn-init - bin/cfn-push-stats - bin/cfn-signal - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[wheel] -universal = 1 - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html diff --git a/setup.py b/setup.py deleted file mode 100755 index 70c2b3f..0000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 9a2e1ea..0000000 --- a/test-requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Hacking already pins down pep8, pyflakes and flake8 -hacking>=0.8.0,<0.9 - -mock>=1.0 -discover -openstackdocstheme>=1.11.0 # Apache-2.0 -sphinx>=1.6.2 # BSD -testrepository>=0.0.18 -testtools>=0.9.34 diff --git a/tools/lintstack.py b/tools/lintstack.py deleted file mode 100755 index 5754637..0000000 --- a/tools/lintstack.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2012, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""pylint error checking.""" - -import cStringIO as StringIO -import json -import re -import sys - -from pylint import lint -from pylint.reporters import text - -# Note(maoy): E1103 is error code related to partial type inference -ignore_codes = ["E1103"] -# Note(maoy): the error message is the pattern of E0202. It should be ignored -# for nova.tests modules -ignore_messages = ["An attribute affected in nova.tests"] -# Note(maoy): we ignore all errors in openstack.common because it should be -# checked elsewhere. We also ignore nova.tests for now due to high false -# positive rate. -ignore_modules = ["nova/openstack/common/", "nova/tests/"] - -KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" - - -class LintOutput(object): - - _cached_filename = None - _cached_content = None - - def __init__(self, filename, lineno, line_content, code, message, - lintoutput): - self.filename = filename - self.lineno = lineno - self.line_content = line_content - self.code = code - self.message = message - self.lintoutput = lintoutput - - @classmethod - def from_line(cls, line): - m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) - matched = m.groups() - filename, lineno, code, message = (matched[0], int(matched[1]), - matched[2], matched[-1]) - if cls._cached_filename != filename: - with open(filename) as f: - cls._cached_content = list(f.readlines()) - cls._cached_filename = filename - line_content = cls._cached_content[lineno - 1].rstrip() - return cls(filename, lineno, line_content, code, message, - line.rstrip()) - - @classmethod - def from_msg_to_dict(cls, msg): - """From the output of pylint msg, to a dict, where each key - is a unique error identifier, value is a list of LintOutput - """ - result = {} - for line in msg.splitlines(): - obj = cls.from_line(line) - if obj.is_ignored(): - continue - key = obj.key() - if key not in result: - result[key] = [] - result[key].append(obj) - return result - - def is_ignored(self): - if self.code in ignore_codes: - return True - if any(self.filename.startswith(name) for name in ignore_modules): - return True - if any(msg in self.message for msg in ignore_messages): - return True - return False - - def key(self): - if self.code in ["E1101", "E1103"]: - # These two types of errors are like Foo class has no member bar. - # We discard the source code so that the error will be ignored - # next time another Foo.bar is encountered. - return self.message, "" - return self.message, self.line_content.strip() - - def json(self): - return json.dumps(self.__dict__) - - def review_str(self): - return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" - "%(code)s: %(message)s" % self.__dict__) - - -class ErrorKeys(object): - - @classmethod - def print_json(cls, errors, output=sys.stdout): - print >>output, "# automatically generated by tools/lintstack.py" - for i in sorted(errors.keys()): - print >>output, json.dumps(i) - - @classmethod - def from_file(cls, filename): - keys = set() - for line in open(filename): - if line and line[0] != "#": - d = json.loads(line) - keys.add(tuple(d)) - return keys - - -def run_pylint(): - buff = StringIO.StringIO() - reporter = text.ParseableTextReporter(output=buff) - args = ["--include-ids=y", "-E", "nova"] - lint.Run(args, reporter=reporter, exit=False) - val = buff.getvalue() - buff.close() - return val - - -def generate_error_keys(msg=None): - print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE - if msg is None: - msg = run_pylint() - errors = LintOutput.from_msg_to_dict(msg) - with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: - ErrorKeys.print_json(errors, output=f) - - -def validate(newmsg=None): - print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE - known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) - if newmsg is None: - print "Running pylint. Be patient..." - newmsg = run_pylint() - errors = LintOutput.from_msg_to_dict(newmsg) - - print "Unique errors reported by pylint: was %d, now %d." \ - % (len(known), len(errors)) - passed = True - for err_key, err_list in errors.items(): - for err in err_list: - if err_key not in known: - print err.lintoutput - print - passed = False - if passed: - print "Congrats! pylint check passed." - redundant = known - set(errors.keys()) - if redundant: - print "Extra credit: some known pylint exceptions disappeared." - for i in sorted(redundant): - print json.dumps(i) - print "Consider regenerating the exception file if you will." - else: - print("Please fix the errors above. If you believe they are false" - " positives, run 'tools/lintstack.py generate' to overwrite.") - sys.exit(1) - - -def usage(): - print """Usage: tools/lintstack.py [generate|validate] - To generate pylint_exceptions file: tools/lintstack.py generate - To validate the current commit: tools/lintstack.py - """ - - -def main(): - option = "validate" - if len(sys.argv) > 1: - option = sys.argv[1] - if option == "generate": - generate_error_keys() - elif option == "validate": - validate() - else: - usage() - - -if __name__ == "__main__": - main() diff --git a/tools/lintstack.sh b/tools/lintstack.sh deleted file mode 100755 index d8591d0..0000000 --- a/tools/lintstack.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) 2012-2013, AT&T Labs, Yun Mao -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Use lintstack.py to compare pylint errors. -# We run pylint twice, once on HEAD, once on the code before the latest -# commit for review. -set -e -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -# Get the current branch name. -GITHEAD=`git rev-parse --abbrev-ref HEAD` -if [[ "$GITHEAD" == "HEAD" ]]; then - # In detached head mode, get revision number instead - GITHEAD=`git rev-parse HEAD` - echo "Currently we are at commit $GITHEAD" -else - echo "Currently we are at branch $GITHEAD" -fi - -cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py - -if git rev-parse HEAD^2 2>/dev/null; then - # The HEAD is a Merge commit. Here, the patch to review is - # HEAD^2, the master branch is at HEAD^1, and the patch was - # written based on HEAD^2~1. - PREV_COMMIT=`git rev-parse HEAD^2~1` - git checkout HEAD~1 - # The git merge is necessary for reviews with a series of patches. - # If not, this is a no-op so won't hurt either. - git merge $PREV_COMMIT -else - # The HEAD is not a merge commit. This won't happen on gerrit. - # Most likely you are running against your own patch locally. - # We assume the patch to examine is HEAD, and we compare it against - # HEAD~1 - git checkout HEAD~1 -fi - -# First generate tools/pylint_exceptions from HEAD~1 -$TOOLS_DIR/lintstack.head.py generate -# Then use that as a reference to compare against HEAD -git checkout $GITHEAD -$TOOLS_DIR/lintstack.head.py -echo "Check passed. FYI: the pylint exceptions are:" -cat $TOOLS_DIR/pylint_exceptions - diff --git a/tox.ini b/tox.ini deleted file mode 100644 index f974f36..0000000 --- a/tox.ini +++ /dev/null @@ -1,35 +0,0 @@ -[tox] -envlist = py34,py27,pep8 - -[testenv] -setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt -commands = python setup.py testr --slowest --testr-args='{posargs}' - -[testenv:pep8] -commands = flake8 - flake8 --filename=cfn-* bin - -[testenv:pylint] -setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - pylint==0.26.0 -commands = bash tools/lintstack.sh - -[testenv:cover] -commands = - python setup.py testr --coverage --testr-args='{posargs}' - -[testenv:venv] -commands = {posargs} - -[flake8] -show-source = true -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools - -[testenv:docs] -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt - sphinxcontrib-httpdomain -commands = python setup.py build_sphinx