summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Beisner <ryan.beisner@canonical.com>2018-11-01 20:43:46 -0500
committerRyan Beisner <ryan.beisner@canonical.com>2019-02-27 10:31:57 +0100
commit51b76b3cb8e730203f818813581a3282a0dd3e98 (patch)
tree1497556263829f7479373b32ffae883a6452653d
parente625b1a8814ac2b9a5321e3ea994624999397f7f (diff)
Retire projectHEADmaster
Leave README around for those that follow. http://lists.openstack.org/pipermail/openstack-discuss/2019-February/003186.html http://lists.openstack.org/pipermail/openstack-discuss/2018-November/000057.html Change-Id: I02a34115c6c3d6e9c4b9152af0c96f2fe79866b9
Notes
Notes (review): Code-Review+2: Frode Nordahl <frode.nordahl@canonical.com> Workflow+1: Frode Nordahl <frode.nordahl@canonical.com> Verified+2: Zuul Submitted-by: Zuul Submitted-at: Wed, 27 Feb 2019 09:54:01 +0000 Reviewed-on: https://review.openstack.org/614907 Project: openstack/charm-odl-controller Branch: refs/heads/master
-rw-r--r--.gitignore6
-rw-r--r--.project17
-rw-r--r--.testr.conf8
-rw-r--r--.zuul.yaml3
-rw-r--r--LICENSE202
-rw-r--r--Makefile21
-rw-r--r--README.md46
-rw-r--r--actions/.keep3
-rw-r--r--charm-helpers-sync.yaml11
-rw-r--r--config.yaml40
-rw-r--r--copyright16
-rw-r--r--files/odl-controller.conf22
-rw-r--r--files/odl-controller.service12
-rw-r--r--hooks/charmhelpers/__init__.py97
-rw-r--r--hooks/charmhelpers/contrib/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/network/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/network/ip.py602
-rw-r--r--hooks/charmhelpers/contrib/openstack/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/openstack/alternatives.py44
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py357
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py1515
-rw-r--r--hooks/charmhelpers/contrib/openstack/cert_utils.py227
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py1904
-rw-r--r--hooks/charmhelpers/contrib/openstack/exceptions.py21
-rw-r--r--hooks/charmhelpers/contrib/openstack/files/__init__.py16
-rwxr-xr-xhooks/charmhelpers/contrib/openstack/files/check_haproxy.sh34
-rwxr-xr-xhooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--hooks/charmhelpers/contrib/openstack/ha/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/openstack/ha/utils.py265
-rw-r--r--hooks/charmhelpers/contrib/openstack/ip.py196
-rw-r--r--hooks/charmhelpers/contrib/openstack/keystone.py178
-rw-r--r--hooks/charmhelpers/contrib/openstack/neutron.py354
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/__init__.py16
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/ceph.conf24
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg77
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/memcached.conf53
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend29
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf29
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken12
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy10
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka20
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache6
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware5
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications11
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf91
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py379
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py1691
-rw-r--r--hooks/charmhelpers/contrib/openstack/vaultlocker.py126
-rw-r--r--hooks/charmhelpers/contrib/python/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/python/packages.py154
-rw-r--r--hooks/charmhelpers/contrib/storage/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/bcache.py74
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py1472
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/loopback.py86
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/lvm.py182
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py85
-rw-r--r--hooks/charmhelpers/core/__init__.py13
-rw-r--r--hooks/charmhelpers/core/decorators.py55
-rw-r--r--hooks/charmhelpers/core/files.py43
-rw-r--r--hooks/charmhelpers/core/fstab.py132
-rw-r--r--hooks/charmhelpers/core/hookenv.py1299
-rw-r--r--hooks/charmhelpers/core/host.py1042
-rw-r--r--hooks/charmhelpers/core/host_factory/__init__.py0
-rw-r--r--hooks/charmhelpers/core/host_factory/centos.py72
-rw-r--r--hooks/charmhelpers/core/host_factory/ubuntu.py91
-rw-r--r--hooks/charmhelpers/core/hugepage.py69
-rw-r--r--hooks/charmhelpers/core/kernel.py72
-rw-r--r--hooks/charmhelpers/core/kernel_factory/__init__.py0
-rw-r--r--hooks/charmhelpers/core/kernel_factory/centos.py17
-rw-r--r--hooks/charmhelpers/core/kernel_factory/ubuntu.py13
-rw-r--r--hooks/charmhelpers/core/services/__init__.py16
-rw-r--r--hooks/charmhelpers/core/services/base.py362
-rw-r--r--hooks/charmhelpers/core/services/helpers.py290
-rw-r--r--hooks/charmhelpers/core/strutils.py129
-rw-r--r--hooks/charmhelpers/core/sysctl.py58
-rw-r--r--hooks/charmhelpers/core/templating.py93
-rw-r--r--hooks/charmhelpers/core/unitdata.py525
-rw-r--r--hooks/charmhelpers/fetch/__init__.py205
-rw-r--r--hooks/charmhelpers/fetch/archiveurl.py165
-rw-r--r--hooks/charmhelpers/fetch/bzrurl.py76
-rw-r--r--hooks/charmhelpers/fetch/centos.py171
-rw-r--r--hooks/charmhelpers/fetch/giturl.py69
-rw-r--r--hooks/charmhelpers/fetch/snap.py150
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py592
-rw-r--r--hooks/charmhelpers/osplatform.py25
-rw-r--r--hooks/charmhelpers/payload/__init__.py15
-rw-r--r--hooks/charmhelpers/payload/archive.py71
-rw-r--r--hooks/charmhelpers/payload/execd.py65
l---------hooks/config-changed1
l---------hooks/controller-api-relation-joined1
-rwxr-xr-xhooks/install20
l---------hooks/install.real1
-rwxr-xr-xhooks/odl_controller_hooks.py133
-rw-r--r--hooks/odl_controller_utils.py181
l---------hooks/ovsdb-manager-relation-joined1
l---------hooks/start1
l---------hooks/stop1
l---------hooks/upgrade-charm1
-rw-r--r--icon.svg394
-rw-r--r--lib/.keep3
-rw-r--r--metadata.yaml20
-rw-r--r--requirements.txt11
-rw-r--r--templates/settings.xml37
-rw-r--r--test-requirements.txt33
-rw-r--r--tests/README.md9
-rw-r--r--tests/basic_deployment.py541
-rwxr-xr-xtests/gate-basic-trusty-icehouse23
-rwxr-xr-xtests/gate-basic-trusty-mitaka25
-rwxr-xr-xtests/gate-basic-xenial-mitaka24
-rwxr-xr-xtests/gate-basic-xenial-ocata26
-rw-r--r--tests/gate-basic-xenial-pike26
-rw-r--r--tests/tests.yaml18
-rw-r--r--tox.ini85
-rw-r--r--unit_tests/__init__.py17
-rw-r--r--unit_tests/odl_outputs.py285
-rw-r--r--unit_tests/test_odl_controller_hooks.py144
-rw-r--r--unit_tests/test_odl_controller_utils.py120
-rw-r--r--unit_tests/test_utils.py138
123 files changed, 6 insertions, 19345 deletions
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 98435fd..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
1bin
2.testrepository
3.coverage
4.tox
5*.sw[nop]
6*.pyc
diff --git a/.project b/.project
deleted file mode 100644
index 435dd5a..0000000
--- a/.project
+++ /dev/null
@@ -1,17 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<projectDescription>
3 <name>odl-controller</name>
4 <comment></comment>
5 <projects>
6 </projects>
7 <buildSpec>
8 <buildCommand>
9 <name>org.python.pydev.PyDevBuilder</name>
10 <arguments>
11 </arguments>
12 </buildCommand>
13 </buildSpec>
14 <natures>
15 <nature>org.python.pydev.pythonNature</nature>
16 </natures>
17</projectDescription>
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644
index 801646b..0000000
--- a/.testr.conf
+++ /dev/null
@@ -1,8 +0,0 @@
1[DEFAULT]
2test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
3 OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
4 OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
5 ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
6
7test_id_option=--load-list $IDFILE
8test_list_option=--list
diff --git a/.zuul.yaml b/.zuul.yaml
index aa9c508..e7c200a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,4 +1,3 @@
1- project: 1- project:
2 templates: 2 templates:
3 - python-charm-jobs 3 - noop-jobs
4 - openstack-python35-jobs-nonvoting
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 158f74c..0000000
--- a/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
1#!/usr/bin/make
2PYTHON := /usr/bin/env python
3
4lint:
5 @tox -e pep8
6
7test:
8 @echo Starting unit tests...
9 @tox -e py27
10
11functional_test:
12 @echo Starting amulet tests...
13 @tox -e func27
14
15bin/charm_helpers_sync.py:
16 @mkdir -p bin
17 @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
18
19
20sync: bin/charm_helpers_sync.py
21 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/README.md b/README.md
index a239e77..b72ee4f 100644
--- a/README.md
+++ b/README.md
@@ -1,42 +1,6 @@
1# Overview 1This project is no longer maintained.
2 2
3OpenDaylight (www.opendaylight.org) is a fully featured Software Defined Networking (SDN) solution for private clouds. It provides a Neutron plugin to 3The contents of this repository are still available in the Git
4integrate with OpenStack. 4source code management system. To see the contents of this
5 5repository before it reached its end of life, please check out the
6This charm is designed to be used in conjunction with the rest of the OpenStack related charms in the charm store to virtualize the network that Nova Compute instances plug into. 6previous commit with "git checkout HEAD^1".
7
8This charm provides the controller component of an OpenDayLight installation.
9
10Only OpenStack Icehouse or newer is supported.
11
12# Usage
13
14To deploy the OpenDayLight controller:
15
16 juju deploy odl-controller
17
18To integrate OpenDayLight into an OpenStack Cloud (subset of commands):
19
20 juju deploy neutron-api-odl
21 juju deploy openvswitch-odl
22
23The neutron-gateway charm must also be deployed with 'ovs-odl' as the plugin configuration option:
24
25 cat > config.yaml << EOF
26 neutron-gateway:
27 plugin: ovs-odl
28 EOF
29 juju deploy --config config.yaml neutron-gateway
30
31And then add relations between services to complete the deployment:
32
33 juju add-relation neutron-api neutron-api-odl
34 juju add-relation neutron-api-odl odl-controller
35
36 juju add-relation openvswitch-odl nova-compute
37 juju add-relation openvswitch-odl neutron-gateway
38 juju add-relation openvswitch-odl odl-controller
39
40# Contact Information
41
42Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/odl-controller/+filebug)
diff --git a/actions/.keep b/actions/.keep
deleted file mode 100644
index f49b91a..0000000
--- a/actions/.keep
+++ /dev/null
@@ -1,3 +0,0 @@
1 This file was created by release-tools to ensure that this empty
2 directory is preserved in vcs re: lint check definitions in global
3 tox.ini files. This file can be removed if/when this dir is actually in use.
diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml
deleted file mode 100644
index 4b24474..0000000
--- a/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
1repo: https://github.com/juju/charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - payload
7 - osplatform
8 - contrib.openstack|inc=*
9 - contrib.storage
10 - contrib.network.ip
11 - contrib.python.packages
diff --git a/config.yaml b/config.yaml
deleted file mode 100644
index 3f2c691..0000000
--- a/config.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
1options:
2 profile:
3 type: string
4 default: default
5 description: |
6 SDN controller profile to configure OpenDayLight for; supported values include
7
8 cisco-vpp: Cisco VPP for OpenStack
9 openvswitch-odl: Open vSwitch OpenDayLight for OpenStack - Helium release
10 openvswitch-odl-lithium: Open vSwitch OpenDayLight for OpenStack - Lithium release
11 openvswitch-odl-beryllium: Open vSwitch OpenDayLight for OpenStack - Beryllium release
12 openvswitch-odl-boron: Open vSwitch OpenDayLight for OpenStack - Boron release
13
14 Only a single profile is supported at any one time.
15 install-url:
16 type: string
17 default: "https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz"
18 description: |
19 Web addressable location of OpenDayLight binaries to install
20
21 If unset, the charm will install binaries from the opendaylight-karaf
22 package.
23 install-sources:
24 type: string
25 default: ''
26 description: |
27 Package sources to install. Can be used to specify where to install the
28 opendaylight-karaf package from.
29 install-keys:
30 type: string
31 default: ''
32 description: Apt keys for package install sources
33 http-proxy:
34 type: string
35 default: ''
36 description: Proxy to use for http connections for OpenDayLight
37 https-proxy:
38 type: string
39 default: ''
40 description: Proxy to use for https connections for OpenDayLight
diff --git a/copyright b/copyright
deleted file mode 100644
index 6c92060..0000000
--- a/copyright
+++ /dev/null
@@ -1,16 +0,0 @@
1Format: http://dep.debian.net/deps/dep5/
2
3Files: *
4Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
5License: Apache-2.0
6 Licensed under the Apache License, Version 2.0 (the "License"); you may
7 not use this file except in compliance with the License. You may obtain
8 a copy of the License at
9
10 http://www.apache.org/licenses/LICENSE-2.0
11
12 Unless required by applicable law or agreed to in writing, software
13 distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 License for the specific language governing permissions and limitations
16 under the License.
diff --git a/files/odl-controller.conf b/files/odl-controller.conf
deleted file mode 100644
index 9995abb..0000000
--- a/files/odl-controller.conf
+++ /dev/null
@@ -1,22 +0,0 @@
1description "OpenDaylight Controller"
2author "Robert Ayres <robert.ayres@ubuntu.com>"
3
4start on runlevel [2345]
5stop on runlevel [!2345]
6
7chdir /opt/opendaylight-karaf
8setuid opendaylight
9
10env ODL_HOME=/opt/opendaylight-karaf
11env ODL_LOG=/var/log/opendaylight/odl-controller.log
12
13pre-start script
14 [ -e "$ODL_HOME" ] || { stop; exit 0; }
15end script
16
17exec "$ODL_HOME/bin/karaf" server >> "$ODL_LOG" 2>&1 < /dev/null
18
19pre-stop script
20 "$ODL_HOME/bin/karaf" stop
21 sleep 10
22end script
diff --git a/files/odl-controller.service b/files/odl-controller.service
deleted file mode 100644
index 1cf22bb..0000000
--- a/files/odl-controller.service
+++ /dev/null
@@ -1,12 +0,0 @@
1[Unit]
2Description=OpenDayLight SDN Controller
3After=network.target
4
5[Service]
6Type=forking
7User=opendaylight
8Group=opendaylight
9ExecStart=/opt/opendaylight-karaf/bin/start
10
11[Install]
12WantedBy=multi-user.target
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
deleted file mode 100644
index e7aa471..0000000
--- a/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,97 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# Bootstrap charm-helpers, installing its dependencies if necessary using
16# only standard libraries.
17from __future__ import print_function
18from __future__ import absolute_import
19
20import functools
21import inspect
22import subprocess
23import sys
24
25try:
26 import six # flake8: noqa
27except ImportError:
28 if sys.version_info.major == 2:
29 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
30 else:
31 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
32 import six # flake8: noqa
33
34try:
35 import yaml # flake8: noqa
36except ImportError:
37 if sys.version_info.major == 2:
38 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
39 else:
40 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
41 import yaml # flake8: noqa
42
43
44# Holds a list of mapping of mangled function names that have been deprecated
45# using the @deprecate decorator below. This is so that the warning is only
46# printed once for each usage of the function.
47__deprecated_functions = {}
48
49
50def deprecate(warning, date=None, log=None):
51 """Add a deprecation warning the first time the function is used.
52 The date, which is a string in semi-ISO8660 format indicate the year-month
53 that the function is officially going to be removed.
54
55 usage:
56
57 @deprecate('use core/fetch/add_source() instead', '2017-04')
58 def contributed_add_source_thing(...):
59 ...
60
61 And it then prints to the log ONCE that the function is deprecated.
62 The reason for passing the logging function (log) is so that hookenv.log
63 can be used for a charm if needed.
64
65 :param warning: String to indicat where it has moved ot.
66 :param date: optional sting, in YYYY-MM format to indicate when the
67 function will definitely (probably) be removed.
68 :param log: The log function to call to log. If not, logs to stdout
69 """
70 def wrap(f):
71
72 @functools.wraps(f)
73 def wrapped_f(*args, **kwargs):
74 try:
75 module = inspect.getmodule(f)
76 file = inspect.getsourcefile(f)
77 lines = inspect.getsourcelines(f)
78 f_name = "{}-{}-{}..{}-{}".format(
79 module.__name__, file, lines[0], lines[-1], f.__name__)
80 except (IOError, TypeError):
81 # assume it was local, so just use the name of the function
82 f_name = f.__name__
83 if f_name not in __deprecated_functions:
84 __deprecated_functions[f_name] = True
85 s = "DEPRECATION WARNING: Function {} is being removed".format(
86 f.__name__)
87 if date:
88 s = "{} on/around {}".format(s, date)
89 if warning:
90 s = "{} : {}".format(s, warning)
91 if log:
92 log(s)
93 else:
94 print(s)
95 return f(*args, **kwargs)
96 return wrapped_f
97 return wrap
diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index b13277b..0000000
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,602 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import glob
16import re
17import subprocess
18import six
19import socket
20
21from functools import partial
22
23from charmhelpers.fetch import apt_install, apt_update
24from charmhelpers.core.hookenv import (
25 config,
26 log,
27 network_get_primary_address,
28 unit_get,
29 WARNING,
30 NoNetworkBinding,
31)
32
33from charmhelpers.core.host import (
34 lsb_release,
35 CompareHostReleases,
36)
37
38try:
39 import netifaces
40except ImportError:
41 apt_update(fatal=True)
42 if six.PY2:
43 apt_install('python-netifaces', fatal=True)
44 else:
45 apt_install('python3-netifaces', fatal=True)
46 import netifaces
47
48try:
49 import netaddr
50except ImportError:
51 apt_update(fatal=True)
52 if six.PY2:
53 apt_install('python-netaddr', fatal=True)
54 else:
55 apt_install('python3-netaddr', fatal=True)
56 import netaddr
57
58
59def _validate_cidr(network):
60 try:
61 netaddr.IPNetwork(network)
62 except (netaddr.core.AddrFormatError, ValueError):
63 raise ValueError("Network (%s) is not in CIDR presentation format" %
64 network)
65
66
67def no_ip_found_error_out(network):
68 errmsg = ("No IP address found in network(s): %s" % network)
69 raise ValueError(errmsg)
70
71
72def _get_ipv6_network_from_address(address):
73 """Get an netaddr.IPNetwork for the given IPv6 address
74 :param address: a dict as returned by netifaces.ifaddresses
75 :returns netaddr.IPNetwork: None if the address is a link local or loopback
76 address
77 """
78 if address['addr'].startswith('fe80') or address['addr'] == "::1":
79 return None
80
81 prefix = address['netmask'].split("/")
82 if len(prefix) > 1:
83 netmask = prefix[1]
84 else:
85 netmask = address['netmask']
86 return netaddr.IPNetwork("%s/%s" % (address['addr'],
87 netmask))
88
89
90def get_address_in_network(network, fallback=None, fatal=False):
91 """Get an IPv4 or IPv6 address within the network from the host.
92
93 :param network (str): CIDR presentation format. For example,
94 '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
95 :param fallback (str): If no address is found, return fallback.
96 :param fatal (boolean): If no address is found, fallback is not
97 set and fatal is True then exit(1).
98 """
99 if network is None:
100 if fallback is not None:
101 return fallback
102
103 if fatal:
104 no_ip_found_error_out(network)
105 else:
106 return None
107
108 networks = network.split() or [network]
109 for network in networks:
110 _validate_cidr(network)
111 network = netaddr.IPNetwork(network)
112 for iface in netifaces.interfaces():
113 try:
114 addresses = netifaces.ifaddresses(iface)
115 except ValueError:
116 # If an instance was deleted between
117 # netifaces.interfaces() run and now, its interfaces are gone
118 continue
119 if network.version == 4 and netifaces.AF_INET in addresses:
120 for addr in addresses[netifaces.AF_INET]:
121 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
122 addr['netmask']))
123 if cidr in network:
124 return str(cidr.ip)
125
126 if network.version == 6 and netifaces.AF_INET6 in addresses:
127 for addr in addresses[netifaces.AF_INET6]:
128 cidr = _get_ipv6_network_from_address(addr)
129 if cidr and cidr in network:
130 return str(cidr.ip)
131
132 if fallback is not None:
133 return fallback
134
135 if fatal:
136 no_ip_found_error_out(network)
137
138 return None
139
140
141def is_ipv6(address):
142 """Determine whether provided address is IPv6 or not."""
143 try:
144 address = netaddr.IPAddress(address)
145 except netaddr.AddrFormatError:
146 # probably a hostname - so not an address at all!
147 return False
148
149 return address.version == 6
150
151
152def is_address_in_network(network, address):
153 """
154 Determine whether the provided address is within a network range.
155
156 :param network (str): CIDR presentation format. For example,
157 '192.168.1.0/24'.
158 :param address: An individual IPv4 or IPv6 address without a net
159 mask or subnet prefix. For example, '192.168.1.1'.
160 :returns boolean: Flag indicating whether address is in network.
161 """
162 try:
163 network = netaddr.IPNetwork(network)
164 except (netaddr.core.AddrFormatError, ValueError):
165 raise ValueError("Network (%s) is not in CIDR presentation format" %
166 network)
167
168 try:
169 address = netaddr.IPAddress(address)
170 except (netaddr.core.AddrFormatError, ValueError):
171 raise ValueError("Address (%s) is not in correct presentation format" %
172 address)
173
174 if address in network:
175 return True
176 else:
177 return False
178
179
180def _get_for_address(address, key):
181 """Retrieve an attribute of or the physical interface that
182 the IP address provided could be bound to.
183
184 :param address (str): An individual IPv4 or IPv6 address without a net
185 mask or subnet prefix. For example, '192.168.1.1'.
186 :param key: 'iface' for the physical interface name or an attribute
187 of the configured interface, for example 'netmask'.
188 :returns str: Requested attribute or None if address is not bindable.
189 """
190 address = netaddr.IPAddress(address)
191 for iface in netifaces.interfaces():
192 addresses = netifaces.ifaddresses(iface)
193 if address.version == 4 and netifaces.AF_INET in addresses:
194 addr = addresses[netifaces.AF_INET][0]['addr']
195 netmask = addresses[netifaces.AF_INET][0]['netmask']
196 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
197 cidr = network.cidr
198 if address in cidr:
199 if key == 'iface':
200 return iface
201 else:
202 return addresses[netifaces.AF_INET][0][key]
203
204 if address.version == 6 and netifaces.AF_INET6 in addresses:
205 for addr in addresses[netifaces.AF_INET6]:
206 network = _get_ipv6_network_from_address(addr)
207 if not network:
208 continue
209
210 cidr = network.cidr
211 if address in cidr:
212 if key == 'iface':
213 return iface
214 elif key == 'netmask' and cidr:
215 return str(cidr).split('/')[1]
216 else:
217 return addr[key]
218 return None
219
220
221get_iface_for_address = partial(_get_for_address, key='iface')
222
223
224get_netmask_for_address = partial(_get_for_address, key='netmask')
225
226
227def resolve_network_cidr(ip_address):
228 '''
229 Resolves the full address cidr of an ip_address based on
230 configured network interfaces
231 '''
232 netmask = get_netmask_for_address(ip_address)
233 return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
234
235
236def format_ipv6_addr(address):
237 """If address is IPv6, wrap it in '[]' otherwise return None.
238
239 This is required by most configuration files when specifying IPv6
240 addresses.
241 """
242 if is_ipv6(address):
243 return "[%s]" % address
244
245 return None
246
247
248def is_ipv6_disabled():
249 try:
250 result = subprocess.check_output(
251 ['sysctl', 'net.ipv6.conf.all.disable_ipv6'],
252 stderr=subprocess.STDOUT,
253 universal_newlines=True)
254 except subprocess.CalledProcessError:
255 return True
256
257 return "net.ipv6.conf.all.disable_ipv6 = 1" in result
258
259
260def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
261 fatal=True, exc_list=None):
262 """Return the assigned IP address for a given interface, if any.
263
264 :param iface: network interface on which address(es) are expected to
265 be found.
266 :param inet_type: inet address family
267 :param inc_aliases: include alias interfaces in search
268 :param fatal: if True, raise exception if address not found
269 :param exc_list: list of addresses to ignore
270 :return: list of ip addresses
271 """
272 # Extract nic if passed /dev/ethX
273 if '/' in iface:
274 iface = iface.split('/')[-1]
275
276 if not exc_list:
277 exc_list = []
278
279 try:
280 inet_num = getattr(netifaces, inet_type)
281 except AttributeError:
282 raise Exception("Unknown inet type '%s'" % str(inet_type))
283
284 interfaces = netifaces.interfaces()
285 if inc_aliases:
286 ifaces = []
287 for _iface in interfaces:
288 if iface == _iface or _iface.split(':')[0] == iface:
289 ifaces.append(_iface)
290
291 if fatal and not ifaces:
292 raise Exception("Invalid interface '%s'" % iface)
293
294 ifaces.sort()
295 else:
296 if iface not in interfaces:
297 if fatal:
298 raise Exception("Interface '%s' not found " % (iface))
299 else:
300 return []
301
302 else:
303 ifaces = [iface]
304
305 addresses = []
306 for netiface in ifaces:
307 net_info = netifaces.ifaddresses(netiface)
308 if inet_num in net_info:
309 for entry in net_info[inet_num]:
310 if 'addr' in entry and entry['addr'] not in exc_list:
311 addresses.append(entry['addr'])
312
313 if fatal and not addresses:
314 raise Exception("Interface '%s' doesn't have any %s addresses." %
315 (iface, inet_type))
316
317 return sorted(addresses)
318
319
320get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
321
322
323def get_iface_from_addr(addr):
324 """Work out on which interface the provided address is configured."""
325 for iface in netifaces.interfaces():
326 addresses = netifaces.ifaddresses(iface)
327 for inet_type in addresses:
328 for _addr in addresses[inet_type]:
329 _addr = _addr['addr']
330 # link local
331 ll_key = re.compile("(.+)%.*")
332 raw = re.match(ll_key, _addr)
333 if raw:
334 _addr = raw.group(1)
335
336 if _addr == addr:
337 log("Address '%s' is configured on iface '%s'" %
338 (addr, iface))
339 return iface
340
341 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
342 raise Exception(msg)
343
344
345def sniff_iface(f):
346 """Ensure decorated function is called with a value for iface.
347
348 If no iface provided, inject net iface inferred from unit private address.
349 """
350 def iface_sniffer(*args, **kwargs):
351 if not kwargs.get('iface', None):
352 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
353
354 return f(*args, **kwargs)
355
356 return iface_sniffer
357
358
359@sniff_iface
360def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
361 dynamic_only=True):
362 """Get assigned IPv6 address for a given interface.
363
364 Returns list of addresses found. If no address found, returns empty list.
365
366 If iface is None, we infer the current primary interface by doing a reverse
367 lookup on the unit private-address.
368
369 We currently only support scope global IPv6 addresses i.e. non-temporary
370 addresses. If no global IPv6 address is found, return the first one found
371 in the ipv6 address list.
372
373 :param iface: network interface on which ipv6 address(es) are expected to
374 be found.
375 :param inc_aliases: include alias interfaces in search
376 :param fatal: if True, raise exception if address not found
377 :param exc_list: list of addresses to ignore
378 :param dynamic_only: only recognise dynamic addresses
379 :return: list of ipv6 addresses
380 """
381 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
382 inc_aliases=inc_aliases, fatal=fatal,
383 exc_list=exc_list)
384
385 if addresses:
386 global_addrs = []
387 for addr in addresses:
388 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
389 m = re.match(key_scope_link_local, addr)
390 if m:
391 eui_64_mac = m.group(1)
392 iface = m.group(2)
393 else:
394 global_addrs.append(addr)
395
396 if global_addrs:
397 # Make sure any found global addresses are not temporary
398 cmd = ['ip', 'addr', 'show', iface]
399 out = subprocess.check_output(cmd).decode('UTF-8')
400 if dynamic_only:
401 key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
402 else:
403 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
404
405 addrs = []
406 for line in out.split('\n'):
407 line = line.strip()
408 m = re.match(key, line)
409 if m and 'temporary' not in line:
410 # Return the first valid address we find
411 for addr in global_addrs:
412 if m.group(1) == addr:
413 if not dynamic_only or \
414 m.group(1).endswith(eui_64_mac):
415 addrs.append(addr)
416
417 if addrs:
418 return addrs
419
420 if fatal:
421 raise Exception("Interface '%s' does not have a scope global "
422 "non-temporary ipv6 address." % iface)
423
424 return []
425
426
427def get_bridges(vnic_dir='/sys/devices/virtual/net'):
428 """Return a list of bridges on the system."""
429 b_regex = "%s/*/bridge" % vnic_dir
430 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
431
432
433def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
434 """Return a list of nics comprising a given bridge on the system."""
435 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
436 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
437
438
439def is_bridge_member(nic):
440 """Check if a given nic is a member of a bridge."""
441 for bridge in get_bridges():
442 if nic in get_bridge_nics(bridge):
443 return True
444
445 return False
446
447
448def is_ip(address):
449 """
450 Returns True if address is a valid IP address.
451 """
452 try:
453 # Test to see if already an IPv4/IPv6 address
454 address = netaddr.IPAddress(address)
455 return True
456 except (netaddr.AddrFormatError, ValueError):
457 return False
458
459
460def ns_query(address):
461 try:
462 import dns.resolver
463 except ImportError:
464 if six.PY2:
465 apt_install('python-dnspython', fatal=True)
466 else:
467 apt_install('python3-dnspython', fatal=True)
468 import dns.resolver
469
470 if isinstance(address, dns.name.Name):
471 rtype = 'PTR'
472 elif isinstance(address, six.string_types):
473 rtype = 'A'
474 else:
475 return None
476
477 try:
478 answers = dns.resolver.query(address, rtype)
479 except dns.resolver.NXDOMAIN:
480 return None
481
482 if answers:
483 return str(answers[0])
484 return None
485
486
487def get_host_ip(hostname, fallback=None):
488 """
489 Resolves the IP for a given hostname, or returns
490 the input if it is already an IP.
491 """
492 if is_ip(hostname):
493 return hostname
494
495 ip_addr = ns_query(hostname)
496 if not ip_addr:
497 try:
498 ip_addr = socket.gethostbyname(hostname)
499 except Exception:
500 log("Failed to resolve hostname '%s'" % (hostname),
501 level=WARNING)
502 return fallback
503 return ip_addr
504
505
506def get_hostname(address, fqdn=True):
507 """
508 Resolves hostname for given IP, or returns the input
509 if it is already a hostname.
510 """
511 if is_ip(address):
512 try:
513 import dns.reversename
514 except ImportError:
515 if six.PY2:
516 apt_install("python-dnspython", fatal=True)
517 else:
518 apt_install("python3-dnspython", fatal=True)
519 import dns.reversename
520
521 rev = dns.reversename.from_address(address)
522 result = ns_query(rev)
523
524 if not result:
525 try:
526 result = socket.gethostbyaddr(address)[0]
527 except Exception:
528 return None
529 else:
530 result = address
531
532 if fqdn:
533 # strip trailing .
534 if result.endswith('.'):
535 return result[:-1]
536 else:
537 return result
538 else:
539 return result.split('.')[0]
540
541
542def port_has_listener(address, port):
543 """
544 Returns True if the address:port is open and being listened to,
545 else False.
546
547 @param address: an IP address or hostname
548 @param port: integer port
549
550 Note calls 'zc' via a subprocess shell
551 """
552 cmd = ['nc', '-z', address, str(port)]
553 result = subprocess.call(cmd)
554 return not(bool(result))
555
556
557def assert_charm_supports_ipv6():
558 """Check whether we are able to support charms ipv6."""
559 release = lsb_release()['DISTRIB_CODENAME'].lower()
560 if CompareHostReleases(release) < "trusty":
561 raise Exception("IPv6 is not supported in the charms for Ubuntu "
562 "versions less than Trusty 14.04")
563
564
565def get_relation_ip(interface, cidr_network=None):
566 """Return this unit's IP for the given interface.
567
568 Allow for an arbitrary interface to use with network-get to select an IP.
569 Handle all address selection options including passed cidr network and
570 IPv6.
571
572 Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8')
573
574 @param interface: string name of the relation.
575 @param cidr_network: string CIDR Network to select an address from.
576 @raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
577 @returns IPv6 or IPv4 address
578 """
579 # Select the interface address first
580 # For possible use as a fallback bellow with get_address_in_network
581 try:
582 # Get the interface specific IP
583 address = network_get_primary_address(interface)
584 except NotImplementedError:
585 # If network-get is not available
586 address = get_host_ip(unit_get('private-address'))
587 except NoNetworkBinding:
588 log("No network binding for {}".format(interface), WARNING)
589 address = get_host_ip(unit_get('private-address'))
590
591 if config('prefer-ipv6'):
592 # Currently IPv6 has priority, eventually we want IPv6 to just be
593 # another network space.
594 assert_charm_supports_ipv6()
595 return get_ipv6_addr()[0]
596 elif cidr_network:
597 # If a specific CIDR network is passed get the address from that
598 # network.
599 return get_address_in_network(cidr_network, address)
600
601 # Return the interface address
602 return address
diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index 547de09..0000000
--- a/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,44 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15''' Helper for managing alternatives for file conflict resolution '''
16
17import subprocess
18import shutil
19import os
20
21
22def install_alternative(name, target, source, priority=50):
23 ''' Install alternative configuration '''
24 if (os.path.exists(target) and not os.path.islink(target)):
25 # Move existing file/directory away before installing
26 shutil.move(target, '{}.bak'.format(target))
27 cmd = [
28 'update-alternatives', '--force', '--install',
29 target, name, source, str(priority)
30 ]
31 subprocess.check_call(cmd)
32
33
34def remove_alternative(name, source):
35 """Remove an installed alternative configuration file
36
37 :param name: string name of the alternative to remove
38 :param source: string full path to alternative to remove
39 """
40 cmd = [
41 'update-alternatives', '--remove',
42 name, source
43 ]
44 subprocess.check_call(cmd)
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 1c96752..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,357 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import logging
16import os
17import re
18import sys
19import six
20from collections import OrderedDict
21from charmhelpers.contrib.amulet.deployment import (
22 AmuletDeployment
23)
24from charmhelpers.contrib.openstack.amulet.utils import (
25 OPENSTACK_RELEASES_PAIRS
26)
27
28DEBUG = logging.DEBUG
29ERROR = logging.ERROR
30
31
32class OpenStackAmuletDeployment(AmuletDeployment):
33 """OpenStack amulet deployment.
34
35 This class inherits from AmuletDeployment and has additional support
36 that is specifically for use by OpenStack charms.
37 """
38
39 def __init__(self, series=None, openstack=None, source=None,
40 stable=True, log_level=DEBUG):
41 """Initialize the deployment environment."""
42 super(OpenStackAmuletDeployment, self).__init__(series)
43 self.log = self.get_logger(level=log_level)
44 self.log.info('OpenStackAmuletDeployment: init')
45 self.openstack = openstack
46 self.source = source
47 self.stable = stable
48
49 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
50 """Get a logger object that will log to stdout."""
51 log = logging
52 logger = log.getLogger(name)
53 fmt = log.Formatter("%(asctime)s %(funcName)s "
54 "%(levelname)s: %(message)s")
55
56 handler = log.StreamHandler(stream=sys.stdout)
57 handler.setLevel(level)
58 handler.setFormatter(fmt)
59
60 logger.addHandler(handler)
61 logger.setLevel(level)
62
63 return logger
64
65 def _determine_branch_locations(self, other_services):
66 """Determine the branch locations for the other services.
67
68 Determine if the local branch being tested is derived from its
69 stable or next (dev) branch, and based on this, use the corresonding
70 stable or next branches for the other_services."""
71
72 self.log.info('OpenStackAmuletDeployment: determine branch locations')
73
74 # Charms outside the ~openstack-charmers
75 base_charms = {
76 'mysql': ['trusty'],
77 'mongodb': ['trusty'],
78 'nrpe': ['trusty', 'xenial'],
79 }
80
81 for svc in other_services:
82 # If a location has been explicitly set, use it
83 if svc.get('location'):
84 continue
85 if svc['name'] in base_charms:
86 # NOTE: not all charms have support for all series we
87 # want/need to test against, so fix to most recent
88 # that each base charm supports
89 target_series = self.series
90 if self.series not in base_charms[svc['name']]:
91 target_series = base_charms[svc['name']][-1]
92 svc['location'] = 'cs:{}/{}'.format(target_series,
93 svc['name'])
94 elif self.stable:
95 svc['location'] = 'cs:{}/{}'.format(self.series,
96 svc['name'])
97 else:
98 svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
99 self.series,
100 svc['name']
101 )
102
103 return other_services
104
105 def _add_services(self, this_service, other_services, use_source=None,
106 no_origin=None):
107 """Add services to the deployment and optionally set
108 openstack-origin/source.
109
110 :param this_service dict: Service dictionary describing the service
111 whose amulet tests are being run
112 :param other_services dict: List of service dictionaries describing
113 the services needed to support the target
114 service
115 :param use_source list: List of services which use the 'source' config
116 option rather than 'openstack-origin'
117 :param no_origin list: List of services which do not support setting
118 the Cloud Archive.
119 Service Dict:
120 {
121 'name': str charm-name,
122 'units': int number of units,
123 'constraints': dict of juju constraints,
124 'location': str location of charm,
125 }
126 eg
127 this_service = {
128 'name': 'openvswitch-odl',
129 'constraints': {'mem': '8G'},
130 }
131 other_services = [
132 {
133 'name': 'nova-compute',
134 'units': 2,
135 'constraints': {'mem': '4G'},
136 'location': cs:~bob/xenial/nova-compute
137 },
138 {
139 'name': 'mysql',
140 'constraints': {'mem': '2G'},
141 },
142 {'neutron-api-odl'}]
143 use_source = ['mysql']
144 no_origin = ['neutron-api-odl']
145 """
146 self.log.info('OpenStackAmuletDeployment: adding services')
147
148 other_services = self._determine_branch_locations(other_services)
149
150 super(OpenStackAmuletDeployment, self)._add_services(this_service,
151 other_services)
152
153 services = other_services
154 services.append(this_service)
155
156 use_source = use_source or []
157 no_origin = no_origin or []
158
159 # Charms which should use the source config option
160 use_source = list(set(
161 use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
162 'ceph-osd', 'ceph-radosgw', 'ceph-mon',
163 'ceph-proxy', 'percona-cluster', 'lxd']))
164
165 # Charms which can not use openstack-origin, ie. many subordinates
166 no_origin = list(set(
167 no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
168 'nrpe', 'openvswitch-odl', 'neutron-api-odl',
169 'odl-controller', 'cinder-backup', 'nexentaedge-data',
170 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
171 'cinder-nexentaedge', 'nexentaedge-mgmt']))
172
173 if self.openstack:
174 for svc in services:
175 if svc['name'] not in use_source + no_origin:
176 config = {'openstack-origin': self.openstack}
177 self.d.configure(svc['name'], config)
178
179 if self.source:
180 for svc in services:
181 if svc['name'] in use_source and svc['name'] not in no_origin:
182 config = {'source': self.source}
183 self.d.configure(svc['name'], config)
184
185 def _configure_services(self, configs):
186 """Configure all of the services."""
187 self.log.info('OpenStackAmuletDeployment: configure services')
188 for service, config in six.iteritems(configs):
189 self.d.configure(service, config)
190
191 def _auto_wait_for_status(self, message=None, exclude_services=None,
192 include_only=None, timeout=None):
193 """Wait for all units to have a specific extended status, except
194 for any defined as excluded. Unless specified via message, any
195 status containing any case of 'ready' will be considered a match.
196
197 Examples of message usage:
198
199 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
200 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
201
202 Wait for all units to reach this status (exact match):
203 message = re.compile('^Unit is ready and clustered$')
204
205 Wait for all units to reach any one of these (exact match):
206 message = re.compile('Unit is ready|OK|Ready')
207
208 Wait for at least one unit to reach this status (exact match):
209 message = {'ready'}
210
211 See Amulet's sentry.wait_for_messages() for message usage detail.
212 https://github.com/juju/amulet/blob/master/amulet/sentry.py
213
214 :param message: Expected status match
215 :param exclude_services: List of juju service names to ignore,
216 not to be used in conjuction with include_only.
217 :param include_only: List of juju service names to exclusively check,
218 not to be used in conjuction with exclude_services.
219 :param timeout: Maximum time in seconds to wait for status match
220 :returns: None. Raises if timeout is hit.
221 """
222 if not timeout:
223 timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
224 self.log.info('Waiting for extended status on units for {}s...'
225 ''.format(timeout))
226
227 all_services = self.d.services.keys()
228
229 if exclude_services and include_only:
230 raise ValueError('exclude_services can not be used '
231 'with include_only')
232
233 if message:
234 if isinstance(message, re._pattern_type):
235 match = message.pattern
236 else:
237 match = message
238
239 self.log.debug('Custom extended status wait match: '
240 '{}'.format(match))
241 else:
242 self.log.debug('Default extended status wait match: contains '
243 'READY (case-insensitive)')
244 message = re.compile('.*ready.*', re.IGNORECASE)
245
246 if exclude_services:
247 self.log.debug('Excluding services from extended status match: '
248 '{}'.format(exclude_services))
249 else:
250 exclude_services = []
251
252 if include_only:
253 services = include_only
254 else:
255 services = list(set(all_services) - set(exclude_services))
256
257 self.log.debug('Waiting up to {}s for extended status on services: '
258 '{}'.format(timeout, services))
259 service_messages = {service: message for service in services}
260
261 # Check for idleness
262 self.d.sentry.wait(timeout=timeout)
263 # Check for error states and bail early
264 self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
265 # Check for ready messages
266 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
267
268 self.log.info('OK')
269
270 def _get_openstack_release(self):
271 """Get openstack release.
272
273 Return an integer representing the enum value of the openstack
274 release.
275 """
276 # Must be ordered by OpenStack release (not by Ubuntu release):
277 for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
278 setattr(self, os_pair, i)
279
280 releases = {
281 ('trusty', None): self.trusty_icehouse,
282 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
283 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
284 ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
285 ('xenial', None): self.xenial_mitaka,
286 ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
287 ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
288 ('xenial', 'cloud:xenial-pike'): self.xenial_pike,
289 ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
290 ('yakkety', None): self.yakkety_newton,
291 ('zesty', None): self.zesty_ocata,
292 ('artful', None): self.artful_pike,
293 ('bionic', None): self.bionic_queens,
294 ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
295 ('cosmic', None): self.cosmic_rocky,
296 }
297 return releases[(self.series, self.openstack)]
298
299 def _get_openstack_release_string(self):
300 """Get openstack release string.
301
302 Return a string representing the openstack release.
303 """
304 releases = OrderedDict([
305 ('trusty', 'icehouse'),
306 ('xenial', 'mitaka'),
307 ('yakkety', 'newton'),
308 ('zesty', 'ocata'),
309 ('artful', 'pike'),
310 ('bionic', 'queens'),
311 ('cosmic', 'rocky'),
312 ])
313 if self.openstack:
314 os_origin = self.openstack.split(':')[1]
315 return os_origin.split('%s-' % self.series)[1].split('/')[0]
316 else:
317 return releases[self.series]
318
319 def get_ceph_expected_pools(self, radosgw=False):
320 """Return a list of expected ceph pools in a ceph + cinder + glance
321 test scenario, based on OpenStack release and whether ceph radosgw
322 is flagged as present or not."""
323
324 if self._get_openstack_release() == self.trusty_icehouse:
325 # Icehouse
326 pools = [
327 'data',
328 'metadata',
329 'rbd',
330 'cinder-ceph',
331 'glance'
332 ]
333 elif (self.trusty_kilo <= self._get_openstack_release() <=
334 self.zesty_ocata):
335 # Kilo through Ocata
336 pools = [
337 'rbd',
338 'cinder-ceph',
339 'glance'
340 ]
341 else:
342 # Pike and later
343 pools = [
344 'cinder-ceph',
345 'glance'
346 ]
347
348 if radosgw:
349 pools.extend([
350 '.rgw.root',
351 '.rgw.control',
352 '.rgw',
353 '.rgw.gc',
354 '.users.uid'
355 ])
356
357 return pools
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef4ab54..0000000
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1515 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import amulet
16import json
17import logging
18import os
19import re
20import six
21import time
22import urllib
23import urlparse
24
25import cinderclient.v1.client as cinder_client
26import cinderclient.v2.client as cinder_clientv2
27import glanceclient.v1.client as glance_client
28import heatclient.v1.client as heat_client
29from keystoneclient.v2_0 import client as keystone_client
30from keystoneauth1.identity import (
31 v3,
32 v2,
33)
34from keystoneauth1 import session as keystone_session
35from keystoneclient.v3 import client as keystone_client_v3
36from novaclient import exceptions
37
38import novaclient.client as nova_client
39import novaclient
40import pika
41import swiftclient
42
43from charmhelpers.core.decorators import retry_on_exception
44from charmhelpers.contrib.amulet.utils import (
45 AmuletUtils
46)
47from charmhelpers.core.host import CompareHostReleases
48
49DEBUG = logging.DEBUG
50ERROR = logging.ERROR
51
52NOVA_CLIENT_VERSION = "2"
53
54OPENSTACK_RELEASES_PAIRS = [
55 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
56 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
57 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
58 'xenial_pike', 'artful_pike', 'xenial_queens',
59 'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
60
61
62class OpenStackAmuletUtils(AmuletUtils):
63 """OpenStack amulet utilities.
64
65 This class inherits from AmuletUtils and has additional support
66 that is specifically for use by OpenStack charm tests.
67 """
68
69 def __init__(self, log_level=ERROR):
70 """Initialize the deployment environment."""
71 super(OpenStackAmuletUtils, self).__init__(log_level)
72
73 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
74 public_port, expected, openstack_release=None):
75 """Validate endpoint data. Pick the correct validator based on
76 OpenStack release. Expected data should be in the v2 format:
77 {
78 'id': id,
79 'region': region,
80 'adminurl': adminurl,
81 'internalurl': internalurl,
82 'publicurl': publicurl,
83 'service_id': service_id}
84
85 """
86 validation_function = self.validate_v2_endpoint_data
87 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
88 if openstack_release and openstack_release >= xenial_queens:
89 validation_function = self.validate_v3_endpoint_data
90 expected = {
91 'id': expected['id'],
92 'region': expected['region'],
93 'region_id': 'RegionOne',
94 'url': self.valid_url,
95 'interface': self.not_null,
96 'service_id': expected['service_id']}
97 return validation_function(endpoints, admin_port, internal_port,
98 public_port, expected)
99
100 def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
101 public_port, expected):
102 """Validate endpoint data.
103
104 Validate actual endpoint data vs expected endpoint data. The ports
105 are used to find the matching endpoint.
106 """
107 self.log.debug('Validating endpoint data...')
108 self.log.debug('actual: {}'.format(repr(endpoints)))
109 found = False
110 for ep in endpoints:
111 self.log.debug('endpoint: {}'.format(repr(ep)))
112 if (admin_port in ep.adminurl and
113 internal_port in ep.internalurl and
114 public_port in ep.publicurl):
115 found = True
116 actual = {'id': ep.id,
117 'region': ep.region,
118 'adminurl': ep.adminurl,
119 'internalurl': ep.internalurl,
120 'publicurl': ep.publicurl,
121 'service_id': ep.service_id}
122 ret = self._validate_dict_data(expected, actual)
123 if ret:
124 return 'unexpected endpoint data - {}'.format(ret)
125
126 if not found:
127 return 'endpoint not found'
128
129 def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
130 public_port, expected, expected_num_eps=3):
131 """Validate keystone v3 endpoint data.
132
133 Validate the v3 endpoint data which has changed from v2. The
134 ports are used to find the matching endpoint.
135
136 The new v3 endpoint data looks like:
137
138 [<Endpoint enabled=True,
139 id=0432655fc2f74d1e9fa17bdaa6f6e60b,
140 interface=admin,
141 links={u'self': u'<RESTful URL of this endpoint>'},
142 region=RegionOne,
143 region_id=RegionOne,
144 service_id=17f842a0dc084b928e476fafe67e4095,
145 url=http://10.5.6.5:9312>,
146 <Endpoint enabled=True,
147 id=6536cb6cb92f4f41bf22b079935c7707,
148 interface=admin,
149 links={u'self': u'<RESTful url of this endpoint>'},
150 region=RegionOne,
151 region_id=RegionOne,
152 service_id=72fc8736fb41435e8b3584205bb2cfa3,
153 url=http://10.5.6.6:35357/v3>,
154 ... ]
155 """
156 self.log.debug('Validating v3 endpoint data...')
157 self.log.debug('actual: {}'.format(repr(endpoints)))
158 found = []
159 for ep in endpoints:
160 self.log.debug('endpoint: {}'.format(repr(ep)))
161 if ((admin_port in ep.url and ep.interface == 'admin') or
162 (internal_port in ep.url and ep.interface == 'internal') or
163 (public_port in ep.url and ep.interface == 'public')):
164 found.append(ep.interface)
165 # note we ignore the links member.
166 actual = {'id': ep.id,
167 'region': ep.region,
168 'region_id': ep.region_id,
169 'interface': self.not_null,
170 'url': ep.url,
171 'service_id': ep.service_id, }
172 ret = self._validate_dict_data(expected, actual)
173 if ret:
174 return 'unexpected endpoint data - {}'.format(ret)
175
176 if len(found) != expected_num_eps:
177 return 'Unexpected number of endpoints found'
178
179 def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
180 """Convert v2 endpoint data into v3.
181
182 {
183 'service_name1': [
184 {
185 'adminURL': adminURL,
186 'id': id,
187 'region': region.
188 'publicURL': publicURL,
189 'internalURL': internalURL
190 }],
191 'service_name2': [
192 {
193 'adminURL': adminURL,
194 'id': id,
195 'region': region.
196 'publicURL': publicURL,
197 'internalURL': internalURL
198 }],
199 }
200 """
201 self.log.warn("Endpoint ID and Region ID validation is limited to not "
202 "null checks after v2 to v3 conversion")
203 for svc in ep_data.keys():
204 assert len(ep_data[svc]) == 1, "Unknown data format"
205 svc_ep_data = ep_data[svc][0]
206 ep_data[svc] = [
207 {
208 'url': svc_ep_data['adminURL'],
209 'interface': 'admin',
210 'region': svc_ep_data['region'],
211 'region_id': self.not_null,
212 'id': self.not_null},
213 {
214 'url': svc_ep_data['publicURL'],
215 'interface': 'public',
216 'region': svc_ep_data['region'],
217 'region_id': self.not_null,
218 'id': self.not_null},
219 {
220 'url': svc_ep_data['internalURL'],
221 'interface': 'internal',
222 'region': svc_ep_data['region'],
223 'region_id': self.not_null,
224 'id': self.not_null}]
225 return ep_data
226
227 def validate_svc_catalog_endpoint_data(self, expected, actual,
228 openstack_release=None):
229 """Validate service catalog endpoint data. Pick the correct validator
230 for the OpenStack version. Expected data should be in the v2 format:
231 {
232 'service_name1': [
233 {
234 'adminURL': adminURL,
235 'id': id,
236 'region': region.
237 'publicURL': publicURL,
238 'internalURL': internalURL
239 }],
240 'service_name2': [
241 {
242 'adminURL': adminURL,
243 'id': id,
244 'region': region.
245 'publicURL': publicURL,
246 'internalURL': internalURL
247 }],
248 }
249
250 """
251 validation_function = self.validate_v2_svc_catalog_endpoint_data
252 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
253 if openstack_release and openstack_release >= xenial_queens:
254 validation_function = self.validate_v3_svc_catalog_endpoint_data
255 expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
256 return validation_function(expected, actual)
257
258 def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
259 """Validate service catalog endpoint data.
260
261 Validate a list of actual service catalog endpoints vs a list of
262 expected service catalog endpoints.
263 """
264 self.log.debug('Validating service catalog endpoint data...')
265 self.log.debug('actual: {}'.format(repr(actual)))
266 for k, v in six.iteritems(expected):
267 if k in actual:
268 ret = self._validate_dict_data(expected[k][0], actual[k][0])
269 if ret:
270 return self.endpoint_error(k, ret)
271 else:
272 return "endpoint {} does not exist".format(k)
273 return ret
274
275 def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
276 """Validate the keystone v3 catalog endpoint data.
277
278 Validate a list of dictinaries that make up the keystone v3 service
279 catalogue.
280
281 It is in the form of:
282
283
284 {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
285 u'interface': u'admin',
286 u'region': u'RegionOne',
287 u'region_id': u'RegionOne',
288 u'url': u'http://10.5.5.224:35357/v3'},
289 {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
290 u'interface': u'public',
291 u'region': u'RegionOne',
292 u'region_id': u'RegionOne',
293 u'url': u'http://10.5.5.224:5000/v3'},
294 {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
295 u'interface': u'internal',
296 u'region': u'RegionOne',
297 u'region_id': u'RegionOne',
298 u'url': u'http://10.5.5.224:5000/v3'}],
299 u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
300 u'interface': u'public',
301 u'region': u'RegionOne',
302 u'region_id': u'RegionOne',
303 u'url': u'http://10.5.5.223:9311'},
304 {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
305 u'interface': u'internal',
306 u'region': u'RegionOne',
307 u'region_id': u'RegionOne',
308 u'url': u'http://10.5.5.223:9311'},
309 {u'id': u'f629388955bc407f8b11d8b7ca168086',
310 u'interface': u'admin',
311 u'region': u'RegionOne',
312 u'region_id': u'RegionOne',
313 u'url': u'http://10.5.5.223:9312'}]}
314
315 Note, that an added complication is that the order of admin, public,
316 internal against 'interface' in each region.
317
318 Thus, the function sorts the expected and actual lists using the
319 interface key as a sort key, prior to the comparison.
320 """
321 self.log.debug('Validating v3 service catalog endpoint data...')
322 self.log.debug('actual: {}'.format(repr(actual)))
323 for k, v in six.iteritems(expected):
324 if k in actual:
325 l_expected = sorted(v, key=lambda x: x['interface'])
326 l_actual = sorted(actual[k], key=lambda x: x['interface'])
327 if len(l_actual) != len(l_expected):
328 return ("endpoint {} has differing number of interfaces "
329 " - expected({}), actual({})"
330 .format(k, len(l_expected), len(l_actual)))
331 for i_expected, i_actual in zip(l_expected, l_actual):
332 self.log.debug("checking interface {}"
333 .format(i_expected['interface']))
334 ret = self._validate_dict_data(i_expected, i_actual)
335 if ret:
336 return self.endpoint_error(k, ret)
337 else:
338 return "endpoint {} does not exist".format(k)
339 return ret
340
341 def validate_tenant_data(self, expected, actual):
342 """Validate tenant data.
343
344 Validate a list of actual tenant data vs list of expected tenant
345 data.
346 """
347 self.log.debug('Validating tenant data...')
348 self.log.debug('actual: {}'.format(repr(actual)))
349 for e in expected:
350 found = False
351 for act in actual:
352 a = {'enabled': act.enabled, 'description': act.description,
353 'name': act.name, 'id': act.id}
354 if e['name'] == a['name']:
355 found = True
356 ret = self._validate_dict_data(e, a)
357 if ret:
358 return "unexpected tenant data - {}".format(ret)
359 if not found:
360 return "tenant {} does not exist".format(e['name'])
361 return ret
362
363 def validate_role_data(self, expected, actual):
364 """Validate role data.
365
366 Validate a list of actual role data vs a list of expected role
367 data.
368 """
369 self.log.debug('Validating role data...')
370 self.log.debug('actual: {}'.format(repr(actual)))
371 for e in expected:
372 found = False
373 for act in actual:
374 a = {'name': act.name, 'id': act.id}
375 if e['name'] == a['name']:
376 found = True
377 ret = self._validate_dict_data(e, a)
378 if ret:
379 return "unexpected role data - {}".format(ret)
380 if not found:
381 return "role {} does not exist".format(e['name'])
382 return ret
383
384 def validate_user_data(self, expected, actual, api_version=None):
385 """Validate user data.
386
387 Validate a list of actual user data vs a list of expected user
388 data.
389 """
390 self.log.debug('Validating user data...')
391 self.log.debug('actual: {}'.format(repr(actual)))
392 for e in expected:
393 found = False
394 for act in actual:
395 if e['name'] == act.name:
396 a = {'enabled': act.enabled, 'name': act.name,
397 'email': act.email, 'id': act.id}
398 if api_version == 3:
399 a['default_project_id'] = getattr(act,
400 'default_project_id',
401 'none')
402 else:
403 a['tenantId'] = act.tenantId
404 found = True
405 ret = self._validate_dict_data(e, a)
406 if ret:
407 return "unexpected user data - {}".format(ret)
408 if not found:
409 return "user {} does not exist".format(e['name'])
410 return ret
411
412 def validate_flavor_data(self, expected, actual):
413 """Validate flavor data.
414
415 Validate a list of actual flavors vs a list of expected flavors.
416 """
417 self.log.debug('Validating flavor data...')
418 self.log.debug('actual: {}'.format(repr(actual)))
419 act = [a.name for a in actual]
420 return self._validate_list_data(expected, act)
421
422 def tenant_exists(self, keystone, tenant):
423 """Return True if tenant exists."""
424 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
425 return tenant in [t.name for t in keystone.tenants.list()]
426
427 @retry_on_exception(num_retries=5, base_delay=1)
428 def keystone_wait_for_propagation(self, sentry_relation_pairs,
429 api_version):
430 """Iterate over list of sentry and relation tuples and verify that
431 api_version has the expected value.
432
433 :param sentry_relation_pairs: list of sentry, relation name tuples used
434 for monitoring propagation of relation
435 data
436 :param api_version: api_version to expect in relation data
437 :returns: None if successful. Raise on error.
438 """
439 for (sentry, relation_name) in sentry_relation_pairs:
440 rel = sentry.relation('identity-service',
441 relation_name)
442 self.log.debug('keystone relation data: {}'.format(rel))
443 if rel.get('api_version') != str(api_version):
444 raise Exception("api_version not propagated through relation"
445 " data yet ('{}' != '{}')."
446 "".format(rel.get('api_version'), api_version))
447
448 def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
449 api_version):
450 """Configure preferred-api-version of keystone in deployment and
451 monitor provided list of relation objects for propagation
452 before returning to caller.
453
454 :param sentry_relation_pairs: list of sentry, relation tuples used for
455 monitoring propagation of relation data
456 :param deployment: deployment to configure
457 :param api_version: value preferred-api-version will be set to
458 :returns: None if successful. Raise on error.
459 """
460 self.log.debug("Setting keystone preferred-api-version: '{}'"
461 "".format(api_version))
462
463 config = {'preferred-api-version': api_version}
464 deployment.d.configure('keystone', config)
465 deployment._auto_wait_for_status()
466 self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
467
468 def authenticate_cinder_admin(self, keystone, api_version=2):
469 """Authenticates admin user with cinder."""
470 self.log.debug('Authenticating cinder admin...')
471 _clients = {
472 1: cinder_client.Client,
473 2: cinder_clientv2.Client}
474 return _clients[api_version](session=keystone.session)
475
476 def authenticate_keystone(self, keystone_ip, username, password,
477 api_version=False, admin_port=False,
478 user_domain_name=None, domain_name=None,
479 project_domain_name=None, project_name=None):
480 """Authenticate with Keystone"""
481 self.log.debug('Authenticating with keystone...')
482 if not api_version:
483 api_version = 2
484 sess, auth = self.get_keystone_session(
485 keystone_ip=keystone_ip,
486 username=username,
487 password=password,
488 api_version=api_version,
489 admin_port=admin_port,
490 user_domain_name=user_domain_name,
491 domain_name=domain_name,
492 project_domain_name=project_domain_name,
493 project_name=project_name
494 )
495 if api_version == 2:
496 client = keystone_client.Client(session=sess)
497 else:
498 client = keystone_client_v3.Client(session=sess)
499 # This populates the client.service_catalog
500 client.auth_ref = auth.get_access(sess)
501 return client
502
503 def get_keystone_session(self, keystone_ip, username, password,
504 api_version=False, admin_port=False,
505 user_domain_name=None, domain_name=None,
506 project_domain_name=None, project_name=None):
507 """Return a keystone session object"""
508 ep = self.get_keystone_endpoint(keystone_ip,
509 api_version=api_version,
510 admin_port=admin_port)
511 if api_version == 2:
512 auth = v2.Password(
513 username=username,
514 password=password,
515 tenant_name=project_name,
516 auth_url=ep
517 )
518 sess = keystone_session.Session(auth=auth)
519 else:
520 auth = v3.Password(
521 user_domain_name=user_domain_name,
522 username=username,
523 password=password,
524 domain_name=domain_name,
525 project_domain_name=project_domain_name,
526 project_name=project_name,
527 auth_url=ep
528 )
529 sess = keystone_session.Session(auth=auth)
530 return (sess, auth)
531
532 def get_keystone_endpoint(self, keystone_ip, api_version=None,
533 admin_port=False):
534 """Return keystone endpoint"""
535 port = 5000
536 if admin_port:
537 port = 35357
538 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
539 port)
540 if api_version == 2:
541 ep = base_ep + "/v2.0"
542 else:
543 ep = base_ep + "/v3"
544 return ep
545
546 def get_default_keystone_session(self, keystone_sentry,
547 openstack_release=None, api_version=2):
548 """Return a keystone session object and client object assuming standard
549 default settings
550
551 Example call in amulet tests:
552 self.keystone_session, self.keystone = u.get_default_keystone_session(
553 self.keystone_sentry,
554 openstack_release=self._get_openstack_release())
555
556 The session can then be used to auth other clients:
557 neutronclient.Client(session=session)
558 aodh_client.Client(session=session)
559 eyc
560 """
561 self.log.debug('Authenticating keystone admin...')
562 # 11 => xenial_queens
563 if api_version == 3 or (openstack_release and openstack_release >= 11):
564 client_class = keystone_client_v3.Client
565 api_version = 3
566 else:
567 client_class = keystone_client.Client
568 keystone_ip = keystone_sentry.info['public-address']
569 session, auth = self.get_keystone_session(
570 keystone_ip,
571 api_version=api_version,
572 username='admin',
573 password='openstack',
574 project_name='admin',
575 user_domain_name='admin_domain',
576 project_domain_name='admin_domain')
577 client = client_class(session=session)
578 # This populates the client.service_catalog
579 client.auth_ref = auth.get_access(session)
580 return session, client
581
582 def authenticate_keystone_admin(self, keystone_sentry, user, password,
583 tenant=None, api_version=None,
584 keystone_ip=None, user_domain_name=None,
585 project_domain_name=None,
586 project_name=None):
587 """Authenticates admin user with the keystone admin endpoint."""
588 self.log.debug('Authenticating keystone admin...')
589 if not keystone_ip:
590 keystone_ip = keystone_sentry.info['public-address']
591
592 # To support backward compatibility usage of this function
593 if not project_name:
594 project_name = tenant
595 if api_version == 3 and not user_domain_name:
596 user_domain_name = 'admin_domain'
597 if api_version == 3 and not project_domain_name:
598 project_domain_name = 'admin_domain'
599 if api_version == 3 and not project_name:
600 project_name = 'admin'
601
602 return self.authenticate_keystone(
603 keystone_ip, user, password,
604 api_version=api_version,
605 user_domain_name=user_domain_name,
606 project_domain_name=project_domain_name,
607 project_name=project_name,
608 admin_port=True)
609
610 def authenticate_keystone_user(self, keystone, user, password, tenant):
611 """Authenticates a regular user with the keystone public endpoint."""
612 self.log.debug('Authenticating keystone user ({})...'.format(user))
613 ep = keystone.service_catalog.url_for(service_type='identity',
614 interface='publicURL')
615 keystone_ip = urlparse.urlparse(ep).hostname
616
617 return self.authenticate_keystone(keystone_ip, user, password,
618 project_name=tenant)
619
620 def authenticate_glance_admin(self, keystone):
621 """Authenticates admin user with glance."""
622 self.log.debug('Authenticating glance admin...')
623 ep = keystone.service_catalog.url_for(service_type='image',
624 interface='adminURL')
625 if keystone.session:
626 return glance_client.Client(ep, session=keystone.session)
627 else:
628 return glance_client.Client(ep, token=keystone.auth_token)
629
630 def authenticate_heat_admin(self, keystone):
631 """Authenticates the admin user with heat."""
632 self.log.debug('Authenticating heat admin...')
633 ep = keystone.service_catalog.url_for(service_type='orchestration',
634 interface='publicURL')
635 if keystone.session:
636 return heat_client.Client(endpoint=ep, session=keystone.session)
637 else:
638 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
639
640 def authenticate_nova_user(self, keystone, user, password, tenant):
641 """Authenticates a regular user with nova-api."""
642 self.log.debug('Authenticating nova user ({})...'.format(user))
643 ep = keystone.service_catalog.url_for(service_type='identity',
644 interface='publicURL')
645 if keystone.session:
646 return nova_client.Client(NOVA_CLIENT_VERSION,
647 session=keystone.session,
648 auth_url=ep)
649 elif novaclient.__version__[0] >= "7":
650 return nova_client.Client(NOVA_CLIENT_VERSION,
651 username=user, password=password,
652 project_name=tenant, auth_url=ep)
653 else:
654 return nova_client.Client(NOVA_CLIENT_VERSION,
655 username=user, api_key=password,
656 project_id=tenant, auth_url=ep)
657
658 def authenticate_swift_user(self, keystone, user, password, tenant):
659 """Authenticates a regular user with swift api."""
660 self.log.debug('Authenticating swift user ({})...'.format(user))
661 ep = keystone.service_catalog.url_for(service_type='identity',
662 interface='publicURL')
663 if keystone.session:
664 return swiftclient.Connection(session=keystone.session)
665 else:
666 return swiftclient.Connection(authurl=ep,
667 user=user,
668 key=password,
669 tenant_name=tenant,
670 auth_version='2.0')
671
672 def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
673 ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
674 """Create the specified flavor."""
675 try:
676 nova.flavors.find(name=name)
677 except (exceptions.NotFound, exceptions.NoUniqueMatch):
678 self.log.debug('Creating flavor ({})'.format(name))
679 nova.flavors.create(name, ram, vcpus, disk, flavorid,
680 ephemeral, swap, rxtx_factor, is_public)
681
682 def create_cirros_image(self, glance, image_name):
683 """Download the latest cirros image and upload it to glance,
684 validate and return a resource pointer.
685
686 :param glance: pointer to authenticated glance connection
687 :param image_name: display name for new image
688 :returns: glance image pointer
689 """
690 self.log.debug('Creating glance cirros image '
691 '({})...'.format(image_name))
692
693 # Download cirros image
694 http_proxy = os.getenv('AMULET_HTTP_PROXY')
695 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
696 if http_proxy:
697 proxies = {'http': http_proxy}
698 opener = urllib.FancyURLopener(proxies)
699 else:
700 opener = urllib.FancyURLopener()
701
702 f = opener.open('http://download.cirros-cloud.net/version/released')
703 version = f.read().strip()
704 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
705 local_path = os.path.join('tests', cirros_img)
706
707 if not os.path.exists(local_path):
708 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
709 version, cirros_img)
710 opener.retrieve(cirros_url, local_path)
711 f.close()
712
713 # Create glance image
714 with open(local_path) as f:
715 image = glance.images.create(name=image_name, is_public=True,
716 disk_format='qcow2',
717 container_format='bare', data=f)
718
719 # Wait for image to reach active status
720 img_id = image.id
721 ret = self.resource_reaches_status(glance.images, img_id,
722 expected_stat='active',
723 msg='Image status wait')
724 if not ret:
725 msg = 'Glance image failed to reach expected state.'
726 amulet.raise_status(amulet.FAIL, msg=msg)
727
728 # Re-validate new image
729 self.log.debug('Validating image attributes...')
730 val_img_name = glance.images.get(img_id).name
731 val_img_stat = glance.images.get(img_id).status
732 val_img_pub = glance.images.get(img_id).is_public
733 val_img_cfmt = glance.images.get(img_id).container_format
734 val_img_dfmt = glance.images.get(img_id).disk_format
735 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
736 'container fmt:{} disk fmt:{}'.format(
737 val_img_name, val_img_pub, img_id,
738 val_img_stat, val_img_cfmt, val_img_dfmt))
739
740 if val_img_name == image_name and val_img_stat == 'active' \
741 and val_img_pub is True and val_img_cfmt == 'bare' \
742 and val_img_dfmt == 'qcow2':
743 self.log.debug(msg_attr)
744 else:
745 msg = ('Volume validation failed, {}'.format(msg_attr))
746 amulet.raise_status(amulet.FAIL, msg=msg)
747
748 return image
749
750 def delete_image(self, glance, image):
751 """Delete the specified image."""
752
753 # /!\ DEPRECATION WARNING
754 self.log.warn('/!\\ DEPRECATION WARNING: use '
755 'delete_resource instead of delete_image.')
756 self.log.debug('Deleting glance image ({})...'.format(image))
757 return self.delete_resource(glance.images, image, msg='glance image')
758
759 def create_instance(self, nova, image_name, instance_name, flavor):
760 """Create the specified instance."""
761 self.log.debug('Creating instance '
762 '({}|{}|{})'.format(instance_name, image_name, flavor))
763 image = nova.glance.find_image(image_name)
764 flavor = nova.flavors.find(name=flavor)
765 instance = nova.servers.create(name=instance_name, image=image,
766 flavor=flavor)
767
768 count = 1
769 status = instance.status
770 while status != 'ACTIVE' and count < 60:
771 time.sleep(3)
772 instance = nova.servers.get(instance.id)
773 status = instance.status
774 self.log.debug('instance status: {}'.format(status))
775 count += 1
776
777 if status != 'ACTIVE':
778 self.log.error('instance creation timed out')
779 return None
780
781 return instance
782
783 def delete_instance(self, nova, instance):
784 """Delete the specified instance."""
785
786 # /!\ DEPRECATION WARNING
787 self.log.warn('/!\\ DEPRECATION WARNING: use '
788 'delete_resource instead of delete_instance.')
789 self.log.debug('Deleting instance ({})...'.format(instance))
790 return self.delete_resource(nova.servers, instance,
791 msg='nova instance')
792
793 def create_or_get_keypair(self, nova, keypair_name="testkey"):
794 """Create a new keypair, or return pointer if it already exists."""
795 try:
796 _keypair = nova.keypairs.get(keypair_name)
797 self.log.debug('Keypair ({}) already exists, '
798 'using it.'.format(keypair_name))
799 return _keypair
800 except Exception:
801 self.log.debug('Keypair ({}) does not exist, '
802 'creating it.'.format(keypair_name))
803
804 _keypair = nova.keypairs.create(name=keypair_name)
805 return _keypair
806
807 def _get_cinder_obj_name(self, cinder_object):
808 """Retrieve name of cinder object.
809
810 :param cinder_object: cinder snapshot or volume object
811 :returns: str cinder object name
812 """
813 # v1 objects store name in 'display_name' attr but v2+ use 'name'
814 try:
815 return cinder_object.display_name
816 except AttributeError:
817 return cinder_object.name
818
819 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
820 img_id=None, src_vol_id=None, snap_id=None):
821 """Create cinder volume, optionally from a glance image, OR
822 optionally as a clone of an existing volume, OR optionally
823 from a snapshot. Wait for the new volume status to reach
824 the expected status, validate and return a resource pointer.
825
826 :param vol_name: cinder volume display name
827 :param vol_size: size in gigabytes
828 :param img_id: optional glance image id
829 :param src_vol_id: optional source volume id to clone
830 :param snap_id: optional snapshot id to use
831 :returns: cinder volume pointer
832 """
833 # Handle parameter input and avoid impossible combinations
834 if img_id and not src_vol_id and not snap_id:
835 # Create volume from image
836 self.log.debug('Creating cinder volume from glance image...')
837 bootable = 'true'
838 elif src_vol_id and not img_id and not snap_id:
839 # Clone an existing volume
840 self.log.debug('Cloning cinder volume...')
841 bootable = cinder.volumes.get(src_vol_id).bootable
842 elif snap_id and not src_vol_id and not img_id:
843 # Create volume from snapshot
844 self.log.debug('Creating cinder volume from snapshot...')
845 snap = cinder.volume_snapshots.find(id=snap_id)
846 vol_size = snap.size
847 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
848 bootable = cinder.volumes.get(snap_vol_id).bootable
849 elif not img_id and not src_vol_id and not snap_id:
850 # Create volume
851 self.log.debug('Creating cinder volume...')
852 bootable = 'false'
853 else:
854 # Impossible combination of parameters
855 msg = ('Invalid method use - name:{} size:{} img_id:{} '
856 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
857 img_id, src_vol_id,
858 snap_id))
859 amulet.raise_status(amulet.FAIL, msg=msg)
860
861 # Create new volume
862 try:
863 vol_new = cinder.volumes.create(display_name=vol_name,
864 imageRef=img_id,
865 size=vol_size,
866 source_volid=src_vol_id,
867 snapshot_id=snap_id)
868 vol_id = vol_new.id
869 except TypeError:
870 vol_new = cinder.volumes.create(name=vol_name,
871 imageRef=img_id,
872 size=vol_size,
873 source_volid=src_vol_id,
874 snapshot_id=snap_id)
875 vol_id = vol_new.id
876 except Exception as e:
877 msg = 'Failed to create volume: {}'.format(e)
878 amulet.raise_status(amulet.FAIL, msg=msg)
879
880 # Wait for volume to reach available status
881 ret = self.resource_reaches_status(cinder.volumes, vol_id,
882 expected_stat="available",
883 msg="Volume status wait")
884 if not ret:
885 msg = 'Cinder volume failed to reach expected state.'
886 amulet.raise_status(amulet.FAIL, msg=msg)
887
888 # Re-validate new volume
889 self.log.debug('Validating volume attributes...')
890 val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
891 val_vol_boot = cinder.volumes.get(vol_id).bootable
892 val_vol_stat = cinder.volumes.get(vol_id).status
893 val_vol_size = cinder.volumes.get(vol_id).size
894 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
895 '{} size:{}'.format(val_vol_name, vol_id,
896 val_vol_stat, val_vol_boot,
897 val_vol_size))
898
899 if val_vol_boot == bootable and val_vol_stat == 'available' \
900 and val_vol_name == vol_name and val_vol_size == vol_size:
901 self.log.debug(msg_attr)
902 else:
903 msg = ('Volume validation failed, {}'.format(msg_attr))
904 amulet.raise_status(amulet.FAIL, msg=msg)
905
906 return vol_new
907
908 def delete_resource(self, resource, resource_id,
909 msg="resource", max_wait=120):
910 """Delete one openstack resource, such as one instance, keypair,
911 image, volume, stack, etc., and confirm deletion within max wait time.
912
913 :param resource: pointer to os resource type, ex:glance_client.images
914 :param resource_id: unique name or id for the openstack resource
915 :param msg: text to identify purpose in logging
916 :param max_wait: maximum wait time in seconds
917 :returns: True if successful, otherwise False
918 """
919 self.log.debug('Deleting OpenStack resource '
920 '{} ({})'.format(resource_id, msg))
921 num_before = len(list(resource.list()))
922 resource.delete(resource_id)
923
924 tries = 0
925 num_after = len(list(resource.list()))
926 while num_after != (num_before - 1) and tries < (max_wait / 4):
927 self.log.debug('{} delete check: '
928 '{} [{}:{}] {}'.format(msg, tries,
929 num_before,
930 num_after,
931 resource_id))
932 time.sleep(4)
933 num_after = len(list(resource.list()))
934 tries += 1
935
936 self.log.debug('{}: expected, actual count = {}, '
937 '{}'.format(msg, num_before - 1, num_after))
938
939 if num_after == (num_before - 1):
940 return True
941 else:
942 self.log.error('{} delete timed out'.format(msg))
943 return False
944
945 def resource_reaches_status(self, resource, resource_id,
946 expected_stat='available',
947 msg='resource', max_wait=120):
948 """Wait for an openstack resources status to reach an
949 expected status within a specified time. Useful to confirm that
950 nova instances, cinder vols, snapshots, glance images, heat stacks
951 and other resources eventually reach the expected status.
952
953 :param resource: pointer to os resource type, ex: heat_client.stacks
954 :param resource_id: unique id for the openstack resource
955 :param expected_stat: status to expect resource to reach
956 :param msg: text to identify purpose in logging
957 :param max_wait: maximum wait time in seconds
958 :returns: True if successful, False if status is not reached
959 """
960
961 tries = 0
962 resource_stat = resource.get(resource_id).status
963 while resource_stat != expected_stat and tries < (max_wait / 4):
964 self.log.debug('{} status check: '
965 '{} [{}:{}] {}'.format(msg, tries,
966 resource_stat,
967 expected_stat,
968 resource_id))
969 time.sleep(4)
970 resource_stat = resource.get(resource_id).status
971 tries += 1
972
973 self.log.debug('{}: expected, actual status = {}, '
974 '{}'.format(msg, resource_stat, expected_stat))
975
976 if resource_stat == expected_stat:
977 return True
978 else:
979 self.log.debug('{} never reached expected status: '
980 '{}'.format(resource_id, expected_stat))
981 return False
982
983 def get_ceph_osd_id_cmd(self, index):
984 """Produce a shell command that will return a ceph-osd id."""
985 return ("`initctl list | grep 'ceph-osd ' | "
986 "awk 'NR=={} {{ print $2 }}' | "
987 "grep -o '[0-9]*'`".format(index + 1))
988
989 def get_ceph_pools(self, sentry_unit):
990 """Return a dict of ceph pools from a single ceph unit, with
991 pool name as keys, pool id as vals."""
992 pools = {}
993 cmd = 'sudo ceph osd lspools'
994 output, code = sentry_unit.run(cmd)
995 if code != 0:
996 msg = ('{} `{}` returned {} '
997 '{}'.format(sentry_unit.info['unit_name'],
998 cmd, code, output))
999 amulet.raise_status(amulet.FAIL, msg=msg)
1000
1001 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
1002 for pool in str(output).split(','):
1003 pool_id_name = pool.split(' ')
1004 if len(pool_id_name) == 2:
1005 pool_id = pool_id_name[0]
1006 pool_name = pool_id_name[1]
1007 pools[pool_name] = int(pool_id)
1008
1009 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
1010 pools))
1011 return pools
1012
1013 def get_ceph_df(self, sentry_unit):
1014 """Return dict of ceph df json output, including ceph pool state.
1015
1016 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
1017 :returns: Dict of ceph df output
1018 """
1019 cmd = 'sudo ceph df --format=json'
1020 output, code = sentry_unit.run(cmd)
1021 if code != 0:
1022 msg = ('{} `{}` returned {} '
1023 '{}'.format(sentry_unit.info['unit_name'],
1024 cmd, code, output))
1025 amulet.raise_status(amulet.FAIL, msg=msg)
1026 return json.loads(output)
1027
1028 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
1029 """Take a sample of attributes of a ceph pool, returning ceph
1030 pool name, object count and disk space used for the specified
1031 pool ID number.
1032
1033 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
1034 :param pool_id: Ceph pool ID
1035 :returns: List of pool name, object count, kb disk space used
1036 """
1037 df = self.get_ceph_df(sentry_unit)
1038 for pool in df['pools']:
1039 if pool['id'] == pool_id:
1040 pool_name = pool['name']
1041 obj_count = pool['stats']['objects']
1042 kb_used = pool['stats']['kb_used']
1043
1044 self.log.debug('Ceph {} pool (ID {}): {} objects, '
1045 '{} kb used'.format(pool_name, pool_id,
1046 obj_count, kb_used))
1047 return pool_name, obj_count, kb_used
1048
1049 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
1050 """Validate ceph pool samples taken over time, such as pool
1051 object counts or pool kb used, before adding, after adding, and
1052 after deleting items which affect those pool attributes. The
1053 2nd element is expected to be greater than the 1st; 3rd is expected
1054 to be less than the 2nd.
1055
1056 :param samples: List containing 3 data samples
1057 :param sample_type: String for logging and usage context
1058 :returns: None if successful, Failure message otherwise
1059 """
1060 original, created, deleted = range(3)
1061 if samples[created] <= samples[original] or \
1062 samples[deleted] >= samples[created]:
1063 return ('Ceph {} samples ({}) '
1064 'unexpected.'.format(sample_type, samples))
1065 else:
1066 self.log.debug('Ceph {} samples (OK): '
1067 '{}'.format(sample_type, samples))
1068 return None
1069
1070 # rabbitmq/amqp specific helpers:
1071
1072 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
1073 """Wait for rmq units extended status to show cluster readiness,
1074 after an optional initial sleep period. Initial sleep is likely
1075 necessary to be effective following a config change, as status
1076 message may not instantly update to non-ready."""
1077
1078 if init_sleep:
1079 time.sleep(init_sleep)
1080
1081 message = re.compile('^Unit is ready and clustered$')
1082 deployment._auto_wait_for_status(message=message,
1083 timeout=timeout,
1084 include_only=['rabbitmq-server'])
1085
1086 def add_rmq_test_user(self, sentry_units,
1087 username="testuser1", password="changeme"):
1088 """Add a test user via the first rmq juju unit, check connection as
1089 the new user against all sentry units.
1090
1091 :param sentry_units: list of sentry unit pointers
1092 :param username: amqp user name, default to testuser1
1093 :param password: amqp user password
1094 :returns: None if successful. Raise on error.
1095 """
1096 self.log.debug('Adding rmq user ({})...'.format(username))
1097
1098 # Check that user does not already exist
1099 cmd_user_list = 'rabbitmqctl list_users'
1100 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
1101 if username in output:
1102 self.log.warning('User ({}) already exists, returning '
1103 'gracefully.'.format(username))
1104 return
1105
1106 perms = '".*" ".*" ".*"'
1107 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
1108 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
1109
1110 # Add user via first unit
1111 for cmd in cmds:
1112 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
1113
1114 # Check connection against the other sentry_units
1115 self.log.debug('Checking user connect against units...')
1116 for sentry_unit in sentry_units:
1117 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
1118 username=username,
1119 password=password)
1120 connection.close()
1121
1122 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
1123 """Delete a rabbitmq user via the first rmq juju unit.
1124
1125 :param sentry_units: list of sentry unit pointers
1126 :param username: amqp user name, default to testuser1
1127 :param password: amqp user password
1128 :returns: None if successful or no such user.
1129 """
1130 self.log.debug('Deleting rmq user ({})...'.format(username))
1131
1132 # Check that the user exists
1133 cmd_user_list = 'rabbitmqctl list_users'
1134 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
1135
1136 if username not in output:
1137 self.log.warning('User ({}) does not exist, returning '
1138 'gracefully.'.format(username))
1139 return
1140
1141 # Delete the user
1142 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
1143 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
1144
1145 def get_rmq_cluster_status(self, sentry_unit):
1146 """Execute rabbitmq cluster status command on a unit and return
1147 the full output.
1148
1149 :param unit: sentry unit
1150 :returns: String containing console output of cluster status command
1151 """
1152 cmd = 'rabbitmqctl cluster_status'
1153 output, _ = self.run_cmd_unit(sentry_unit, cmd)
1154 self.log.debug('{} cluster_status:\n{}'.format(
1155 sentry_unit.info['unit_name'], output))
1156 return str(output)
1157
1158 def get_rmq_cluster_running_nodes(self, sentry_unit):
1159 """Parse rabbitmqctl cluster_status output string, return list of
1160 running rabbitmq cluster nodes.
1161
1162 :param unit: sentry unit
1163 :returns: List containing node names of running nodes
1164 """
1165 # NOTE(beisner): rabbitmqctl cluster_status output is not
1166 # json-parsable, do string chop foo, then json.loads that.
1167 str_stat = self.get_rmq_cluster_status(sentry_unit)
1168 if 'running_nodes' in str_stat:
1169 pos_start = str_stat.find("{running_nodes,") + 15
1170 pos_end = str_stat.find("]},", pos_start) + 1
1171 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
1172 run_nodes = json.loads(str_run_nodes)
1173 return run_nodes
1174 else:
1175 return []
1176
1177 def validate_rmq_cluster_running_nodes(self, sentry_units):
1178 """Check that all rmq unit hostnames are represented in the
1179 cluster_status output of all units.
1180
1181 :param host_names: dict of juju unit names to host names
1182 :param units: list of sentry unit pointers (all rmq units)
1183 :returns: None if successful, otherwise return error message
1184 """
1185 host_names = self.get_unit_hostnames(sentry_units)
1186 errors = []
1187
1188 # Query every unit for cluster_status running nodes
1189 for query_unit in sentry_units:
1190 query_unit_name = query_unit.info['unit_name']
1191 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
1192
1193 # Confirm that every unit is represented in the queried unit's
1194 # cluster_status running nodes output.
1195 for validate_unit in sentry_units:
1196 val_host_name = host_names[validate_unit.info['unit_name']]
1197 val_node_name = 'rabbit@{}'.format(val_host_name)
1198
1199 if val_node_name not in running_nodes:
1200 errors.append('Cluster member check failed on {}: {} not '
1201 'in {}\n'.format(query_unit_name,
1202 val_node_name,
1203 running_nodes))
1204 if errors:
1205 return ''.join(errors)
1206
1207 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
1208 """Check a single juju rmq unit for ssl and port in the config file."""
1209 host = sentry_unit.info['public-address']
1210 unit_name = sentry_unit.info['unit_name']
1211
1212 conf_file = '/etc/rabbitmq/rabbitmq.config'
1213 conf_contents = str(self.file_contents_safe(sentry_unit,
1214 conf_file, max_wait=16))
1215 # Checks
1216 conf_ssl = 'ssl' in conf_contents
1217 conf_port = str(port) in conf_contents
1218
1219 # Port explicitly checked in config
1220 if port and conf_port and conf_ssl:
1221 self.log.debug('SSL is enabled @{}:{} '
1222 '({})'.format(host, port, unit_name))
1223 return True
1224 elif port and not conf_port and conf_ssl:
1225 self.log.debug('SSL is enabled @{} but not on port {} '
1226 '({})'.format(host, port, unit_name))
1227 return False
1228 # Port not checked (useful when checking that ssl is disabled)
1229 elif not port and conf_ssl:
1230 self.log.debug('SSL is enabled @{}:{} '
1231 '({})'.format(host, port, unit_name))
1232 return True
1233 elif not conf_ssl:
1234 self.log.debug('SSL not enabled @{}:{} '
1235 '({})'.format(host, port, unit_name))
1236 return False
1237 else:
1238 msg = ('Unknown condition when checking SSL status @{}:{} '
1239 '({})'.format(host, port, unit_name))
1240 amulet.raise_status(amulet.FAIL, msg)
1241
1242 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
1243 """Check that ssl is enabled on rmq juju sentry units.
1244
1245 :param sentry_units: list of all rmq sentry units
1246 :param port: optional ssl port override to validate
1247 :returns: None if successful, otherwise return error message
1248 """
1249 for sentry_unit in sentry_units:
1250 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
1251 return ('Unexpected condition: ssl is disabled on unit '
1252 '({})'.format(sentry_unit.info['unit_name']))
1253 return None
1254
1255 def validate_rmq_ssl_disabled_units(self, sentry_units):
1256 """Check that ssl is enabled on listed rmq juju sentry units.
1257
1258 :param sentry_units: list of all rmq sentry units
1259 :returns: True if successful. Raise on error.
1260 """
1261 for sentry_unit in sentry_units:
1262 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
1263 return ('Unexpected condition: ssl is enabled on unit '
1264 '({})'.format(sentry_unit.info['unit_name']))
1265 return None
1266
1267 def configure_rmq_ssl_on(self, sentry_units, deployment,
1268 port=None, max_wait=60):
1269 """Turn ssl charm config option on, with optional non-default
1270 ssl port specification. Confirm that it is enabled on every
1271 unit.
1272
1273 :param sentry_units: list of sentry units
1274 :param deployment: amulet deployment object pointer
1275 :param port: amqp port, use defaults if None
1276 :param max_wait: maximum time to wait in seconds to confirm
1277 :returns: None if successful. Raise on error.
1278 """
1279 self.log.debug('Setting ssl charm config option: on')
1280
1281 # Enable RMQ SSL
1282 config = {'ssl': 'on'}
1283 if port:
1284 config['ssl_port'] = port
1285
1286 deployment.d.configure('rabbitmq-server', config)
1287
1288 # Wait for unit status
1289 self.rmq_wait_for_cluster(deployment)
1290
1291 # Confirm
1292 tries = 0
1293 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1294 while ret and tries < (max_wait / 4):
1295 time.sleep(4)
1296 self.log.debug('Attempt {}: {}'.format(tries, ret))
1297 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
1298 tries += 1
1299
1300 if ret:
1301 amulet.raise_status(amulet.FAIL, ret)
1302
1303 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
1304 """Turn ssl charm config option off, confirm that it is disabled
1305 on every unit.
1306
1307 :param sentry_units: list of sentry units
1308 :param deployment: amulet deployment object pointer
1309 :param max_wait: maximum time to wait in seconds to confirm
1310 :returns: None if successful. Raise on error.
1311 """
1312 self.log.debug('Setting ssl charm config option: off')
1313
1314 # Disable RMQ SSL
1315 config = {'ssl': 'off'}
1316 deployment.d.configure('rabbitmq-server', config)
1317
1318 # Wait for unit status
1319 self.rmq_wait_for_cluster(deployment)
1320
1321 # Confirm
1322 tries = 0
1323 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1324 while ret and tries < (max_wait / 4):
1325 time.sleep(4)
1326 self.log.debug('Attempt {}: {}'.format(tries, ret))
1327 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
1328 tries += 1
1329
1330 if ret:
1331 amulet.raise_status(amulet.FAIL, ret)
1332
1333 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
1334 port=None, fatal=True,
1335 username="testuser1", password="changeme"):
1336 """Establish and return a pika amqp connection to the rabbitmq service
1337 running on a rmq juju unit.
1338
1339 :param sentry_unit: sentry unit pointer
1340 :param ssl: boolean, default to False
1341 :param port: amqp port, use defaults if None
1342 :param fatal: boolean, default to True (raises on connect error)
1343 :param username: amqp user name, default to testuser1
1344 :param password: amqp user password
1345 :returns: pika amqp connection pointer or None if failed and non-fatal
1346 """
1347 host = sentry_unit.info['public-address']
1348 unit_name = sentry_unit.info['unit_name']
1349
1350 # Default port logic if port is not specified
1351 if ssl and not port:
1352 port = 5671
1353 elif not ssl and not port:
1354 port = 5672
1355
1356 self.log.debug('Connecting to amqp on {}:{} ({}) as '
1357 '{}...'.format(host, port, unit_name, username))
1358
1359 try:
1360 credentials = pika.PlainCredentials(username, password)
1361 parameters = pika.ConnectionParameters(host=host, port=port,
1362 credentials=credentials,
1363 ssl=ssl,
1364 connection_attempts=3,
1365 retry_delay=5,
1366 socket_timeout=1)
1367 connection = pika.BlockingConnection(parameters)
1368 assert connection.is_open is True
1369 assert connection.is_closing is False
1370 self.log.debug('Connect OK')
1371 return connection
1372 except Exception as e:
1373 msg = ('amqp connection failed to {}:{} as '
1374 '{} ({})'.format(host, port, username, str(e)))
1375 if fatal:
1376 amulet.raise_status(amulet.FAIL, msg)
1377 else:
1378 self.log.warn(msg)
1379 return None
1380
1381 def publish_amqp_message_by_unit(self, sentry_unit, message,
1382 queue="test", ssl=False,
1383 username="testuser1",
1384 password="changeme",
1385 port=None):
1386 """Publish an amqp message to a rmq juju unit.
1387
1388 :param sentry_unit: sentry unit pointer
1389 :param message: amqp message string
1390 :param queue: message queue, default to test
1391 :param username: amqp user name, default to testuser1
1392 :param password: amqp user password
1393 :param ssl: boolean, default to False
1394 :param port: amqp port, use defaults if None
1395 :returns: None. Raises exception if publish failed.
1396 """
1397 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
1398 message))
1399 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1400 port=port,
1401 username=username,
1402 password=password)
1403
1404 # NOTE(beisner): extra debug here re: pika hang potential:
1405 # https://github.com/pika/pika/issues/297
1406 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
1407 self.log.debug('Defining channel...')
1408 channel = connection.channel()
1409 self.log.debug('Declaring queue...')
1410 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
1411 self.log.debug('Publishing message...')
1412 channel.basic_publish(exchange='', routing_key=queue, body=message)
1413 self.log.debug('Closing channel...')
1414 channel.close()
1415 self.log.debug('Closing connection...')
1416 connection.close()
1417
1418 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
1419 username="testuser1",
1420 password="changeme",
1421 ssl=False, port=None):
1422 """Get an amqp message from a rmq juju unit.
1423
1424 :param sentry_unit: sentry unit pointer
1425 :param queue: message queue, default to test
1426 :param username: amqp user name, default to testuser1
1427 :param password: amqp user password
1428 :param ssl: boolean, default to False
1429 :param port: amqp port, use defaults if None
1430 :returns: amqp message body as string. Raise if get fails.
1431 """
1432 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
1433 port=port,
1434 username=username,
1435 password=password)
1436 channel = connection.channel()
1437 method_frame, _, body = channel.basic_get(queue)
1438
1439 if method_frame:
1440 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
1441 body))
1442 channel.basic_ack(method_frame.delivery_tag)
1443 channel.close()
1444 connection.close()
1445 return body
1446 else:
1447 msg = 'No message retrieved.'
1448 amulet.raise_status(amulet.FAIL, msg)
1449
1450 def validate_memcache(self, sentry_unit, conf, os_release,
1451 earliest_release=5, section='keystone_authtoken',
1452 check_kvs=None):
1453 """Check Memcache is running and is configured to be used
1454
1455 Example call from Amulet test:
1456
1457 def test_110_memcache(self):
1458 u.validate_memcache(self.neutron_api_sentry,
1459 '/etc/neutron/neutron.conf',
1460 self._get_openstack_release())
1461
1462 :param sentry_unit: sentry unit
1463 :param conf: OpenStack config file to check memcache settings
1464 :param os_release: Current OpenStack release int code
1465 :param earliest_release: Earliest Openstack release to check int code
1466 :param section: OpenStack config file section to check
1467 :param check_kvs: Dict of settings to check in config file
1468 :returns: None
1469 """
1470 if os_release < earliest_release:
1471 self.log.debug('Skipping memcache checks for deployment. {} <'
1472 'mitaka'.format(os_release))
1473 return
1474 _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
1475 self.log.debug('Checking memcached is running')
1476 ret = self.validate_services_by_name({sentry_unit: ['memcached']})
1477 if ret:
1478 amulet.raise_status(amulet.FAIL, msg='Memcache running check'
1479 'failed {}'.format(ret))
1480 else:
1481 self.log.debug('OK')
1482 self.log.debug('Checking memcache url is configured in {}'.format(
1483 conf))
1484 if self.validate_config_data(sentry_unit, conf, section, _kvs):
1485 message = "Memcache config error in: {}".format(conf)
1486 amulet.raise_status(amulet.FAIL, msg=message)
1487 else:
1488 self.log.debug('OK')
1489 self.log.debug('Checking memcache configuration in '
1490 '/etc/memcached.conf')
1491 contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
1492 fatal=True)
1493 ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
1494 if CompareHostReleases(ubuntu_release) <= 'trusty':
1495 memcache_listen_addr = 'ip6-localhost'
1496 else:
1497 memcache_listen_addr = '::1'
1498 expected = {
1499 '-p': '11211',
1500 '-l': memcache_listen_addr}
1501 found = []
1502 for key, value in expected.items():
1503 for line in contents.split('\n'):
1504 if line.startswith(key):
1505 self.log.debug('Checking {} is set to {}'.format(
1506 key,
1507 value))
1508 assert value == line.split()[-1]
1509 self.log.debug(line.split()[-1])
1510 found.append(key)
1511 if sorted(found) == sorted(expected.keys()):
1512 self.log.debug('OK')
1513 else:
1514 message = "Memcache config error in: /etc/memcached.conf"
1515 amulet.raise_status(amulet.FAIL, msg=message)
diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py
deleted file mode 100644
index de853b5..0000000
--- a/hooks/charmhelpers/contrib/openstack/cert_utils.py
+++ /dev/null
@@ -1,227 +0,0 @@
1# Copyright 2014-2018 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15# Common python helper functions used for OpenStack charm certificats.
16
17import os
18import json
19
20from charmhelpers.contrib.network.ip import (
21 get_hostname,
22 resolve_network_cidr,
23)
24from charmhelpers.core.hookenv import (
25 local_unit,
26 network_get_primary_address,
27 config,
28 relation_get,
29 unit_get,
30 NoNetworkBinding,
31 log,
32 WARNING,
33)
34from charmhelpers.contrib.openstack.ip import (
35 ADMIN,
36 resolve_address,
37 get_vip_in_network,
38 INTERNAL,
39 PUBLIC,
40 ADDRESS_MAP)
41
42from charmhelpers.core.host import (
43 mkdir,
44 write_file,
45)
46
47from charmhelpers.contrib.hahelpers.apache import (
48 install_ca_cert
49)
50
51
52class CertRequest(object):
53
54 """Create a request for certificates to be generated
55 """
56
57 def __init__(self, json_encode=True):
58 self.entries = []
59 self.hostname_entry = None
60 self.json_encode = json_encode
61
62 def add_entry(self, net_type, cn, addresses):
63 """Add a request to the batch
64
65 :param net_type: str netwrok space name request is for
66 :param cn: str Canonical Name for certificate
67 :param addresses: [] List of addresses to be used as SANs
68 """
69 self.entries.append({
70 'cn': cn,
71 'addresses': addresses})
72
73 def add_hostname_cn(self):
74 """Add a request for the hostname of the machine"""
75 ip = unit_get('private-address')
76 addresses = [ip]
77 # If a vip is being used without os-hostname config or
78 # network spaces then we need to ensure the local units
79 # cert has the approriate vip in the SAN list
80 vip = get_vip_in_network(resolve_network_cidr(ip))
81 if vip:
82 addresses.append(vip)
83 self.hostname_entry = {
84 'cn': get_hostname(ip),
85 'addresses': addresses}
86
87 def add_hostname_cn_ip(self, addresses):
88 """Add an address to the SAN list for the hostname request
89
90 :param addr: [] List of address to be added
91 """
92 for addr in addresses:
93 if addr not in self.hostname_entry['addresses']:
94 self.hostname_entry['addresses'].append(addr)
95
96 def get_request(self):
97 """Generate request from the batched up entries
98
99 """