Retire Tripleo: remove repo content

TripleO project is retiring
- https://review.opendev.org/c/openstack/governance/+/905145

this commit remove the content of this project repo

Change-Id: I511202b39dc8fa3416743132a926f2402701632f
This commit is contained in:
Ghanshyam Mann 2024-02-24 11:31:03 -08:00
parent 910efeb558
commit 6b6858e8d7
95 changed files with 8 additions and 6306 deletions

View File

@ -1,7 +0,0 @@
---
parseable: true
exclude_paths:
- infrared_plugin/main.yml
skip_list:
# Add skips here only as last resort, like:
- role-name

3
.envrc
View File

@ -1,3 +0,0 @@
source_up
export ANSIBLE_LIBRARY=./library
export PYTHONPATH=./library:$PYTHONPATH

76
.gitignore vendored
View File

@ -1,76 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
sdist/
var/
container_registry.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
!infrared_plugin/plugin.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
doc/build/
# PyBuilder
target/
# virtualenv
.venv/
# jenkins config
jenkins/config.ini
playbooks/debug.yml
# Files created by releasenotes build
releasenotes/build
# Editors
.*.sw[klmnop]
# ansible retry files
*.retry
ansible_role_collect_logs.egg-info
# buit collection
*.tar.gz

View File

@ -1,53 +0,0 @@
---
repos:
- repo: https://github.com/PyCQA/isort
rev: 5.11.5
hooks:
- id: isort
- repo: https://github.com/python/black.git
rev: 22.3.0
hooks:
- id: black
language_version: python3
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: mixed-line-ending
- id: check-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- repo: https://github.com/pycqa/flake8.git
rev: 3.9.2
hooks:
- id: flake8
additional_dependencies:
- flake8-absolute-import
- flake8-black>=0.1.1
language_version: python3
- repo: https://github.com/ansible/ansible-lint.git
rev: v6.16.2
hooks:
- id: ansible-lint
always_run: true
# do not add file filters here as ansible-lint does not give reliable
# results when called with individual files.
# https://github.com/ansible/ansible-lint/issues/611
verbose: true
additional_dependencies:
- ansible-core
- yamllint
- repo: https://github.com/openstack-dev/bashate.git
rev: 2.0.0
hooks:
- id: bashate
entry: bashate --error . --verbose --ignore=E006,E040
# Run bashate check for all bash scripts
# Ignores the following rules:
# E006: Line longer than 79 columns (as many scripts use jinja
# templating, this is very difficult)
# E040: Syntax error determined using `bash -n` (as many scripts
# use jinja templating, this will often fail and the syntax
# error will be discovered in execution anyway)

175
LICENSE
View File

@ -1,175 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,11 +0,0 @@
global-exclude __pycache__
exclude .benchmarks
exclude .eggs
exclude .mypy_cache
exclude .pytest_cache
exclude .quickstart
exclude .tox
exclude infrared_plugin
exclude plugins
exclude test-playbooks
exclude zuul.d

View File

@ -1,323 +1,10 @@
collect_logs
============
This project is no longer maintained.
Ansible role for aggregating logs from different nodes.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
The only supported way to call this role is using its main entry point. Do not
use ``tasks_from`` as this count as using private interfaces.
Requirements
------------
This role gathers logs and debug information from a target system and
collates them in a designated directory, ``artcl_collect_dir``, on the
localhost.
Additionally, the role will convert templated bash scripts, created and
used by TripleO-Quickstart during deployment, into rST files. These rST
files are combined with static rST files and fed into Sphinx to create
user friendly post-build-documentation specific to an original
deployment.
Finally, the role optionally handles uploading these logs to a rsync
server or to an OpenStack Swift object storage. Logs from Swift can be
exposed with
`os-loganalyze <https://github.com/openstack-infra/os-loganalyze>`__.
Role Variables
--------------
File Collection
~~~~~~~~~~~~~~~
- ``artcl_collect_list`` A list of files and directories to gather
from the target. Directories are collected recursively and need to
end with a '/' to get collected. Should be specified as a YaML list,
e.g.:
.. code:: yaml
artcl_collect_list:
- /etc/nova/
- /home/stack/*.log
- /var/log/
- ``artcl_collect_list_append`` A list of files and directories to be
appended in the default list. This is useful for users that want to
keep the original list and just add more relevant paths.
- ``artcl_exclude_list`` A list of files and directories to exclude
from collecting. This list is passed to rsync as an exclude filter
and it takes precedence over the collection list. For details see the
'FILTER RULES' topic in the rsync man page.
- ``artcl_exclude_list_append`` A list of files and directories to be
appended in the default exclude list. This is useful for users that want to
keep the original list and just add more relevant paths.
- ``artcl_collect_dir`` A local directory where the logs should be
gathered, without a trailing slash.
- ``collect_log_types`` - A list of which type of logs will be collected,
such as openstack logs, network logs, system logs, etc.
Acceptable values are system, monitoring, network, openstack and container.
- ``artcl_gzip``: Archive files, disabled by default.
- ``artcl_rsync_collect_list`` - if true, a rsync filter file is generated for
``rsync`` to collect files, if false, ``find`` is used to generate list
of files to collect for ``rsync``. ``find`` brings some benefits like
searching for files in a certain depth (``artcl_find_maxdepth``) or up to
certain size (``artcl_find_max_size``).
- ``artcl_find_maxdepth`` - Number of levels of directories below the starting
points, default is 4. Note: this variable is applied only when
``artcl_rsync_collect_list`` is set to false.
- ``artcl_find_max_size`` - Max size of a file in MBs to be included in find
search, default value is 256. Note: this variable is applied only when
``artcl_rsync_collect_list`` is set to false.
- ``artcl_commands_extras`` - A nested dictionary of additional commands to be
run during collection. First level contains the group type, as defined by
``collect_log_types`` list which determines which groups are collected and
which ones are skipped.
Defined keys will override implicit ones from defaults
``artcl_commands`` which is not expected to be changed by user.
Second level keys are used to uniqly identify a command and determine the
default output filename, unless is mentioned via ``capture_file`` property.
``cmd`` contains the shell command that would be run.
.. code:: yaml
artcl_commands_extras:
system:
disk-space:
cmd: df
# will save output to /var/log/extras/dist-space.log
mounts:
cmd: mount -a
capture_file: /mounts.txt # <-- custom capture file location
openstack:
key2:
cmd: touch /foo.txt
capture_disable: true # <-- disable implicit std redirection
when: "1 > 2" # <-- optional condition
Documentation generation related
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ``artcl_gen_docs``: false/true If true, the role will use build
artifacts and Sphinx and produce user friendly documentation
(default: false)
- ``artcl_docs_source_dir`` a local directory that serves as the
Sphinx source directory.
- ``artcl_docs_build_dir`` A local directory that serves as the
Sphinx build output directory.
- ``artcl_create_docs_payload`` Dictionary of lists that direct what
and how to construct documentation.
- ``included_deployment_scripts`` List of templated bash scripts
to be converted to rST files.
- ``included_deployment_scripts`` List of static rST files that
will be included in the output documentation.
- ``table_of_contents`` List that defines the order in which rST
files will be laid out in the output documentation.
- ``artcl_verify_sphinx_build`` false/true If true, verify items
defined in ``artcl_create_docs_payload.table_of_contents`` exist in
sphinx generated index.html (default: false)
.. code:: yaml
artcl_create_docs_payload:
included_deployment_scripts:
- undercloud-install
- undercloud-post-install
included_static_docs:
- env-setup-virt
table_of_contents:
- env-setup-virt
- undercloud-install
- undercloud-post-install
Publishing related
~~~~~~~~~~~~~~~~~~
- ``artcl_publish``: true/false If true, the role will attempt to
rsync logs to the target specified by ``artcl_rsync_url``. Uses
``BUILD_URL``, ``BUILD_TAG`` vars from the environment (set during a
Jenkins job run) and requires the next to variables to be set.
- ``artcl_txt_rename``: false/true rename text based file to end in
.txt.gz to make upstream log servers display them in the browser
instead of offering them to download
- ``artcl_publish_timeout``: the maximum seconds the role can spend
uploading the logs, the default is 1800 (30 minutes)
- ``artcl_use_rsync``: false/true use rsync to upload the logs
- ``artcl_rsync_use_daemon``: false/true use rsync daemon instead of
ssh to connect
- ``artcl_rsync_url`` rsync target for uploading the logs. The
localhost needs to have passwordless authentication to the target or
the ``PROVISIONER_KEY`` var specified in the environment.
- ``artcl_use_swift``: false/true use swift object storage to publish
the logs
- ``artcl_swift_auth_url`` the OpenStack auth URL for Swift
- ``artcl_swift_username`` OpenStack username for Swift
- ``artcl_swift_password`` password for the Swift user
- ``artcl_swift_tenant_name`` OpenStack tenant (project) name for Swift
- ``artcl_swift_container`` the name of the Swift container to use,
default is ``logs``
- ``artcl_swift_delete_after`` The number of seconds after which
Swift will remove the uploaded objects, the default is 2678400
seconds = 31 days.
- ``artcl_artifact_url`` An HTTP URL at which the uploaded logs will
be accessible after upload.
- ``artcl_report_server_key`` - A path to a key for an access to the report
server.
Ara related
~~~~~~~~~~~
- ``ara_enabled``: true/false - If true, the role will generate ara reports.
- ``ara_overcloud_db_path``: Path to ara overcloud path (tripleo only).
- ``ara_generate_html``: true/false - Generate ara html.
- ``ara_graphite_prefix``: Ara prefix to be used in graphite.
- ``ara_only_successful_tasks``: true/false - Send to graphite only successfull
tasks.
- ``ara_tasks_map``: Dictionary with ara tasks to be mapped on graphite.
Logs parsing
~~~~~~~~~~~~
"Sova" module parses logs for known patterns and returns messages that were
found. Patterns are tagged by issues types, like "infra", "code", etc.
Patterns are located in file sova-patterns.yml in vars/ directory.
- ``config`` - patterns loaded from file
- ``files`` - files and patterns sections match
- ``result`` - path to file to write a result of parsing
- ``result_file_dir`` - directory to write a file with patterns in name
Example of usage of "sova" module:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: yaml
---
- name: Run sova task
sova:
config: "{{ pattern_config }}"
files:
console: "{{ ansible_user_dir }}/workspace/logs/quickstart_install.log"
errors: "/var/log/errors.txt"
"ironic-conductor": "/var/log/containers/ironic/ironic-conductor.log"
syslog: "/var/log/journal.txt"
logstash: "/var/log/extra/logstash.txt"
result: "{{ ansible_user_dir }}/workspace/logs/failures_file"
result_file_dir: "{{ ansible_user_dir }}/workspace/logs"
Example Role Playbook
---------------------
.. code:: yaml
---
- name: Gather logs
hosts: all:!localhost
roles:
- collect_logs
** Note:
The tasks that collect data from the nodes are executed with ignore_errors.
For `example: <https://opendev.org/openstack/ansible-role-collect-logs/src/branch/master/tasks/collect/system.yml#L3>`__
Templated Bash to rST Conversion Notes
--------------------------------------
Templated bash scripts used during deployment are converted to rST files
during the ``create-docs`` portion of the role's call. Shell scripts are
fed into an awk script and output as restructured text. The awk script
has several simple rules:
1. Only lines between ``### ---start_docs`` and ``### ---stop_docs``
will be parsed.
2. Lines containing ``# nodoc`` will be excluded.
3. Lines containing ``## ::`` indicate subsequent lines should be
formatted as code blocks
4. Other lines beginning with ``## <anything else>`` will have the
prepended ``##`` removed. This is how and where general rST
formatting is added.
5. All other lines, including shell comments, will be indented by four
spaces.
Enabling sosreport Collection
-----------------------------
`sosreport <https://github.com/sosreport/sos>`__ is a unified tool for
collecting system logs and other debug information. To enable creation
of sosreport(s) with this role, create a custom config (you can use
centosci-logs.yml as a template) and ensure that
``artcl_collect_sosreport: true`` is set.
Sanitizing Log Strings
----------------------
Logs can contain senstive data such as private links and access
passwords. The 'collect' task provides an option to replace
private strings with sanitized strings to protect private data.
The 'sanitize_log_strings' task makes use of the Ansible 'replace'
module and is enabled by defining a ``sanitize_lines``
variable as shown in the example below:
.. code:: yaml
---
sanitize_lines:
- dir_path: '/tmp/{{ inventory_hostname }}/etc/repos/'
file_pattern: '*'
orig_string: '^(.*)download(.*)$'
sanitized_string: 'SANITIZED_STR_download'
- dir_path: '/tmp/{{ inventory_hostname }}/home/zuul/'
file_pattern: '*'
orig_string: '^(.*)my_private_host\.com(.*)$'
sanitized_string: 'SANITIZED_STR_host'
The task searches for files containing the sensitive strings
(orig_string) within a file path, and then replaces the sensitive
strings in those files with the sanitized_string.
Usage with InfraRed
-------------------
Run the following steps to execute the role with
`infrared <https://infrared.readthedocs.io/en/latest/>`__.
1. Install infrared and add ansible-role-collect-logs plugin by providing
the url to this repo:
.. code-block::
(infrared)$ ir plugin add https://opendev.org/openstack/ansible-role-collect-logs.git --src-path infrared_plugin
2. Verify that the plugin is imported by:
.. code-block::
(infrared)$ ir plugin list
3. Run the plugin:
.. code-block::
(infrared)$ ir ansible-role-collect-logs
License
-------
Apache 2.0
Author Information
------------------
RDO-CI Team
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1,12 +0,0 @@
[defaults]
gathering = smart
retry_files_enabled = False
callbacks_enabled = profile_tasks
# Attempt to load custom modules whether it's installed system-wide or from a virtual environment
roles_path = roles:$VIRTUAL_ENV/share/ansible/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:$VIRTUAL_ENV/usr/share/ansible/roles
# Required by infrared
host_key_checking = False
forks = 500
timeout = 300

View File

@ -1 +0,0 @@
python3-yaml [platform:ubuntu]

View File

@ -1,5 +0,0 @@
# Doc requirements
sphinx>=1.1.2,!=1.2.0,!=1.3b1
oslosphinx>=2.2.0 # Apache-2.0
sphinx_rtd_theme==0.1.7
ansible-core>=2.11,<2.12

View File

@ -1,226 +0,0 @@
@import url("css/theme.css");
@import url("rdo_styling.css");
/* CUSTOM CSS OVERRIDES GO HERE */
/* ============================ */
/* LAYOUT */
.wy-nav-side {
overflow: visible;
}
.wy-side-nav-search {
margin-bottom: 0;
}
.wy-nav-content-wrap {
background: white;
}
.wy-nav-content {
max-width: 100%;
box-sizing: border-box;
}
.rst-content .section ol li p.first:last-child {
margin-bottom: 24px;
}
/* LOGO */
.wy-side-nav-search a {
margin-bottom: 5px;
}
.wy-side-nav-search img {
background: none;
border-radius: 0;
height: 60px;
width: auto;
margin: 0;
}
/* TYPOGRAPHY */
p {
margin-bottom: 16px;
}
p + ul, p + ol.simple {
margin-top: -12px;
}
h1, h2, h3, h4, h5, h6, p.rubric {
margin-top: 48px;
}
h2 {
border-bottom: 1px solid rgba(0, 0, 0, 0.2);
}
/* BREADCRUMBS */
.wy-breadcrumbs {
font-size: 85%;
color: rgba(0, 0, 0, 0.45);
}
.wy-breadcrumbs a {
text-decoration: underline;
color: inherit;
}
.wy-breadcrumbs a:hover,
.wy-breadcrumbs a:focus {
color: rgba(0, 0, 0, 0.75);
text-decoration: none;
}
/* FOOTER */
footer {
font-size: 70%;
margin-top: 48px;
}
footer p {
font-size: inherit;
}
/* NOTES, ADMONITTIONS AND TAGS */
.admonition {
font-size: 85%; /* match code size */
background: rgb(240, 240, 240);
color: rgba(0, 0, 0, 0.55);
border: 1px solid rgba(0, 0, 0, 0.1);
padding: 0.5em 1em 0.75em 1em;
margin-bottom: 24px;
}
.admonition p {
font-size: inherit;
}
.admonition p.last {
margin-bottom: 0;
}
.admonition p.first.admonition-title {
display: inline;
background: none;
font-weight: bold;
color: rgba(0, 0, 0, 0.75);
}
/* notes */
.rst-content .note {
background: rgb(240, 240, 240);
}
.note > p.first.admonition-title {
display: inline-block;
background: rgba(0, 0, 0, 0.55);
color: rgba(255, 255, 255, 0.95);
}
/* optional */
.rst-content .optional {
background: white;
}
/* tags */
.rhel {background: #fee;}
.portal {background-color: #ded;}
.satellite {background-color: #dee;}
.centos {background: #fef;}
.baremetal {background: #eef;}
.virtual {background: #efe;}
.ceph {background: #eff;}
/* admonition selector */
#admonition_selector {
color: white;
font-size: 85%;
line-height: 1.4;
background: #2980b9;
border-top: 1px solid rgba(255, 255, 255, 0.4);
}
.trigger {
display: block;
font-size: 110%;
color: rgba(255, 255, 255, 0.75);
line-height: 2.5;
position: relative;
cursor: pointer;
padding: 0 1.618em;
}
.trigger:after {
content: '';
display: block;
font-family: FontAwesome;
font-size: 70%;
position: absolute;
right: 1.618em;
top: 6px;
}
.trigger:hover {
color: white;
}
.content {
display: none;
border-top: 1px solid rgba(255, 255, 255, 0.1);
background: rgba(255, 255, 255, 0.1);
padding: 0.5em 1.618em;
}
.displayed .trigger:after {
content: '';
}
#admonition_selector .title {
color: rgba(255, 255, 255, 0.45);
}
#admonition_selector ul {
margin-bottom: 0.75em;
}
#admonition_selector ul li {
display: block;
}
#admonition_selector label {
display: inline;
color: inherit;
text-decoration: underline dotted;
}
/* LINKS */
a.external:after {
font-family: FontAwesome;
content: '';
visibility: visible;
display: inline-block;
font-size: 70%;
position: relative;
padding-left: 0.5em;
top: -0.5em;
}
/* LIST */
.wy-plain-list-decimal > li > ul,
.rst-content .section ol > li > ul,
.rst-content ol.arabic > li > ul,
article ol > li > ul {
margin-bottom: 24px;
}

View File

@ -1,208 +0,0 @@
/* general settings */
body {
font-family: "Open Sans", Helvetica, Arial, sans-serif;
font-weight: 300;
font-size: 16px;
}
/* remove backgrounds */
.wy-nav-content,
.wy-body-for-nav,
.wy-nav-side,
#admonition_selector {
background: none !important;
color: black !important;
}
/* page header */
.wy-side-nav-search,
.wy-nav-top {
background: rgba(0, 0, 0, 0.05) !important;
}
.wy-nav-top {
line-height: 40px;
border-bottom: 1px solid rgba(0, 0, 0, 0.1);
}
.wy-side-nav-search a,
.wy-nav-top a,
.wy-nav-top i {
color: rgb(160, 0, 0) !important;
}
.wy-nav-top i {
position: relative;
top: 0.1em;
}
.wy-side-nav-search input[type="text"] {
border-color: rgba(0, 0, 0, 0.25);
}
/* sidebar*/
.wy-nav-side {
border-right: 1px solid rgba(0, 0, 0, 0.2);
}
/* admonition selector */
#admonition_selector {
border-top: 0 none !important;
}
.trigger {
color: rgba(0, 0, 0, 0.7) !important;
border-top: 1px solid rgba(0, 0, 0, 0.2);
border-bottom: 1px solid rgba(0, 0, 0, 0.2);
background: rgba(0, 0, 0, 0.05);
}
.trigger:hover {
color: rgba(0, 0, 0, 0.9) !important;
}
.content {
border-top: 0 none !important;
border-bottom: 1px solid rgba(0, 0, 0, 0.2) !important;
background: rgba(0, 0, 0, 0.025) !important;
}
#admonition_selector .title {
color: rgba(0, 0, 0, 0.6) !important;
}
/* menu */
.wy-menu li a,
.wy-menu-vertical li a {
font-size: 100%;
line-height: 1.6;
color: rgb(80, 80, 80);
}
.wy-menu-vertical li a:hover,
.wy-menu-vertical li a:focus,
.wy-menu-vertical li.current a:hover,
.wy-menu-vertical li.current a:focus {
color: black;
text-decoration: underline;
background: none;
}
.wy-menu-vertical li.current,
.wy-menu-vertical li.current a {
border: 0 none;
color: rgb(80, 80, 80);
font-weight: inherit;
background: none;
}
/* level-1 menu item */
.wy-menu-vertical li.toctree-l1.current > a,
.wy-menu-vertical li.toctree-l1.current > a:hover,
.wy-menu-vertical li.toctree-l1.current > a:focus {
background: rgb(230, 230, 230);
}
.wy-menu li.toctree-l1 > a:before {
font-family: FontAwesome;
content: "";
display: inline-block;
position: relative;
padding-right: 0.5em;
}
/* level-2 menu item */
.toctree-l2 {
font-size: 90%;
color: inherit;
}
.wy-menu-vertical .toctree-l2 a {
padding: 0.4045em 0.5em 0.4045em 2.8em !important;
}
.wy-menu-vertical li.toctree-l2.current > a,
.wy-menu-vertical li.toctree-l2.current > a:hover,
.wy-menu-vertical li.toctree-l2.current > a:focus,
.wy-menu-vertical li.toctree-l2.active > a,
.wy-menu-vertical li.toctree-l2.active > a:hover,
.wy-menu-vertical li.toctree-l2.active > a:focus {
background: rgb(242, 242, 242);
}
.wy-menu li.toctree-l2 > a:before {
font-family: FontAwesome;
content: "";
font-size: 30%;
display: inline-block;
position: relative;
bottom: 0.55em;
padding-right: 1.5em;
}
/* typography */
h1 {
color: rgb(160, 0, 0);
font-weight: 300;
margin-top: 36px !important;
}
h3 {
font-size: 135%;
}
h2, h3, h4, h5 {
font-weight: 200;
}
a, a:visited {
color: #2275b4;
text-decoration: none;
}
a:hover, a:focus {
color: #1c6094;
text-decoration: underline;
}
.rst-content .toc-backref {
color: inherit;
}
strong {
font-weight: 600;
}
/* code */
.codeblock,
pre.literal-block,
.rst-content .literal-block,
.rst-content pre.literal-block,
div[class^="highlight"] {
background: rgba(0, 0, 0, 0.05);
color: black;
}
/* notes */
.admonition {
color: rgba(0, 0, 0, 0.5) !important;
font-weight: 400;
}
.rst-content .note {
background: none !important;
}
.note > p.first.admonition-title {
background: rgba(0, 0, 0, 0.5) !important;
color: rgba(255, 255, 255, 0.9) !important;
}

View File

@ -1 +0,0 @@
{% extends "!layout.html" %}

View File

@ -1,148 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# instack-undercloud documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 25 10:56:57 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import absolute_import, division, print_function
import sphinx_rtd_theme
__metaclass__ = type
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "TripleO"
copyright = "2016, RDO CI Team"
bug_tracker = "Bugzilla"
bug_tracker_url = "https://bugzilla.redhat.com"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "3.0.0"
# The full version, including alpha/beta/rc tags.
release = "3.0.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_static_path = ["_custom"]
html_style = "custom.css"
html_last_updated_fmt = "%b %d, %Y"
# Output file base name for HTML help builder.
htmlhelp_basename = "tripleo-documentor"
html_show_sourcelink = True
html_show_sphinx = True
html_show_copyright = True
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
rst_prolog = """
.. |project| replace:: %s
.. |bug_tracker| replace:: %s
.. |bug_tracker_url| replace:: %s
""" % (
project,
bug_tracker,
bug_tracker_url,
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

View File

@ -1,109 +0,0 @@
Copying over customized instackenv.json, network-environment.yaml, nic-configs
------------------------------------------------------------------------------
instackenv.json
^^^^^^^^^^^^^^^
``instackenv.json`` file is generated from a template in tripleo-quickstart:
<https://github.com/openstack/tripleo-quickstart/blob/master/roles/libvirt/setup/overcloud/tasks/main.yml#L91>.
A customized ``instackenv.json`` can be copied to the undercloud by overwriting the
``undercloud_instackenv_template`` variable with the path to the customized file.
Below is an explanation of, and example of, the ``instackenv.json`` file:
The JSON file describing your Overcloud baremetal nodes, is called
``instackenv.json``. The file should contain a JSON object with the only field
``nodes`` containing list of node descriptions.
Each node description should contains required fields:
* ``pm_type`` - driver for Ironic nodes
* ``pm_addr`` - node BMC IP address (hypervisor address in case of virtual
environment)
* ``pm_user``, ``pm_password`` - node BMC credentials
Some fields are optional if you're going to use introspection later:
* ``mac`` - list of MAC addresses, optional for bare metal
* ``cpu`` - number of CPU's in system
* ``arch`` - CPU architecture (common values are ``i386`` and ``x86_64``)
* ``memory`` - memory size in MiB
* ``disk`` - hard driver size in GiB
It is also possible (but optional) to set Ironic node capabilities directly
in the JSON file. This can be useful for assigning node profiles or setting
boot options at registration time:
* ``capabilities`` - Ironic node capabilities. For example::
"capabilities": "profile:compute,boot_option:local"
For example::
{
"nodes": [
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:2a:0e:36"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.8"
},
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:da:39:c9"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.15"
},
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:51:9b:68"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.16"
}
]
}
network-environment.yaml
^^^^^^^^^^^^^^^^^^^^^^^^
Similarly, the ``network-environment.yaml`` file is generated from a template,
<https://github.com/openstack/tripleo-quickstart/blob/master/roles/tripleo/undercloud/tasks/post-install.yml#L32>
A customized ``network-environment.yaml`` file can be copied to the undercloud by overwriting the
`` network_environment_file`` variable with the path to the customized file.
nic-configs
^^^^^^^^^^^
By default, the virtual environment deployment uses the standard nic-configs files are there is no
ready section to copy custom nic-configs files.
The ``ansible-role-tripleo-overcloud-prep-config`` repo includes a task that copies the nic-configs
files if they are defined,
<https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-config/blob/master/tasks/main.yml#L15>

View File

@ -1,20 +0,0 @@
Customizing external network vlan
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If network-isolation is used in the deployment, tripleo-quickstart will, by default,
add a NIC on the external vlan to the undercloud,
<https://github.com/openstack/tripleo-quickstart/blob/master/roles/tripleo/undercloud/templates/undercloud-install-post.sh.j2#L88>.
When working with a baremetal overcloud, the vlan values must be customized with the correct
system-related values. The default vlan values can be overwritten in a settings file passed
to triple-quickstart as in the following example:
::
undercloud_networks:
external:
address: 10.0.7.13
netmask: 255.255.255.192
device_type: ovs
type: OVSIntPort
ovs_bridge: br-ctlplane
ovs_options: '"tag=102"'
tag: 102

View File

@ -1,20 +0,0 @@
Customizing undercloud.conf
===========================
The undercloud.conf file is copied to the undercloud VM using a template where the system values
are variables. <https://github.com/openstack/tripleo-quickstart/blob/master/roles/tripleo/undercloud/templates/undercloud.conf.j2>.
The tripleo-quickstart defaults for these variables are suited to a virtual overcloud,
but can be overwritten by passing custom settings to tripleo-quickstart in a settings file
(--extra-vars @<file_path>). For example:
::
undercloud_network_cidr: 10.0.5.0/24
undercloud_local_ip: 10.0.5.1/24
undercloud_network_gateway: 10.0.5.1
undercloud_undercloud_public_vip: 10.0.5.2
undercloud_undercloud_admin_vip: 10.0.5.3
undercloud_local_interface: eth1
undercloud_masquerade_network: 10.0.5.0/24
undercloud_dhcp_start: 10.0.5.5
undercloud_dhcp_end: 10.0.5.24
undercloud_inspection_iprange: 10.0.5.100,10.0.5.120

View File

@ -1,42 +0,0 @@
Install the dependencies
------------------------
You need some software available on your local system before you can run
`quickstart.sh`. You can install the necessary dependencies by running:
::
bash quickstart.sh --install-deps
Setup your virtual environment
------------------------------
tripleo-quickstart includes steps to set up libvirt on the undercloud host
machine and to create and setup the undercloud VM.
Deployments on baremetal hardware require steps from third-party repos,
in addition to the steps in tripleo-quickstart.
Below is an example of a complete call to quickstart.sh to run a full deploy
on baremetal overcloud nodes:
::
# $HW_ENV_DIR is the directory where the baremetal environment-specific
# files are stored
pushd $WORKSPACE/tripleo-quickstart
bash quickstart.sh \
--ansible-debug \
--bootstrap \
--working-dir $WORKSPACE/ \
--tags all \
--no-clone \
--teardown all \
--requirements quickstart-role-requirements.txt \
--requirements $WORKSPACE/$HW_ENV_DIR/requirements_files/$REQUIREMENTS_FILE \
--config $WORKSPACE/$HW_ENV_DIR/config_files/$CONFIG_FILE \
--extra-vars @$WORKSPACE/$HW_ENV_DIR/env_settings.yml \
--playbook $PLAYBOOK \
--release $RELEASE \
$VIRTHOST
popd

View File

@ -1,15 +0,0 @@
Additional steps preparing the environment for deployment
---------------------------------------------------------
Depending on the parameters of the baremetal overcloud environment in use,
other pre-deployment steps may be needed to ensure that the deployment succeeds.
<https://github.com/redhat-openstack/ansible-role-tripleo-overcloud-prep-baremetal/tree/master/tasks>
includes a number of these steps. Whether each step is run, depends on variable values
that can be set per environment.
Some examples of additional steps are:
- Adding disk size hints
- Adding disk hints per node, supporting all Ironic hints
- Adjusting MTU values
- Rerunning introspection on failure

View File

@ -1,98 +0,0 @@
Settings for hardware environments
==================================
Throughout the documentation, there are example settings and custom files to
overwrite the virt defaults in TripleO Quickstart. It is recommended to use a
organized directory structure to store the settings and files for each hardware
environment.
Example Directory Structure
---------------------------
Each baremetal environment will need a directory structured as follows:
::
|-- environment_name
| |-- instackenv.json
| |-- vendor_specific_setup
| |-- <architecture diagram/explanation document>
| |-- network_configs
| | |--<network-islation-type-1>
| | | |-- <network-environment.yaml file>
| | | |-- env_settings.yml
| | | |-- nic_configs
| | | | |-- ceph-storage.yaml
| | | | |-- cinder-storage.yaml
| | | | |-- compute.yaml
| | | | |-- controller.yaml
| | | | |-- swift-storage.yaml
| | | |-- config_files
| | | | |--config.yml
| | | | |--<other config files>
| | | |-- requirements_files
| | | | |--requirements1.yml
| | | | |--requirements2.yml
| | |--<network-islation-type-2>
| | | |-- <network-environment.yaml file>
| | | |-- env_settings.yml
| | | |-- nic_configs
| | | | |-- ceph-storage.yaml
| | | | |-- cinder-storage.yaml
| | | | |-- compute.yaml
| | | | |-- controller.yaml
| | | | |-- swift-storage.yaml
| | | |-- config_files
| | | | |--config.yml
| | | | |--<other config files>
| | | |-- requirements_files
| | | | |--requirements1.yml
| | | | |--requirements2.yml
Explanation of Directory Contents
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- instackenv.json (required)
The instackenv.json file added at this top-level directory will replace the templated instackenv.json file for virt deployments.
- vendor_specific_setup (optional)
If any script needs to run to do environment setup before deployment, such as RAID configuration, it can be included here.
- architecture diagram (optional)
Although not required, if there is a diagram or document detailing the network architecture, it is useful to include that document or diagram here as all the settings and network isolation files will be based off of it.
- network_configs (required)
This directory is used to house the directories divided by network isolation type.
- network-isolation-type (required)
Even if deploying without network isolation, the files should be included in a 'none' directory.
There are files examples of the following network isolation types: single-nic-vlans, multiple-nics, bond-with-vlans, public-bond, none [1].
Network isolation types 'single_nic_vlans', 'bond_with_vlans', 'multi-nic' will be deprecated.
[1] Names are derived from the `tripleo-heat-templates configuration <https://github.com/openstack/tripleo-heat-templates/tree/master/network/config>`_
- network-environment.yaml (required, unless deploying with no network isolation)
This file should be named after the network-isolation type, for example: bond_with_vlans.yaml. This naming convention follows the same pattern used by the default, virt workflow.
- env_settings.yaml (required)
This file stores all environment-specific settings to override default settings in TripleO quickstart and related repos, for example: the location of instackenv.json file, and setting 'overcloud_nodes' to empty so that quickstart does not create VMs for overcloud nodes. All settings required for undercloud.conf are included here.
- nic_configs (optional)
If the default nic-config files are not suitable for a particular hardware environment, specific ones can be added here and copied to the undercloud. Ensure that the network-environment.yaml file points to the correct location for the nic-configs to be used in deploy.
- config_files (required)
The deployment details are stored in the config file. Different config files can be created for scaling up nodes, HA, and other deployment combinations.
- requirements_files (required)
Multiple requirements files can be passed to quickstart.sh to include additional repos. For example, to include IPMI validation, the requirements files would need to include are `here <https://github.com/redhat-openstack/ansible-role-tripleo-validate-ipmi>`_

View File

@ -1,20 +0,0 @@
TripleO Quickstart
==================
TripleO Quickstart is a fast and easy way to setup and configure your virtual environment for TripleO.
Further documentation can be found at https://github.com/openstack/tripleo-quickstart
A quick way to test that your virthost machine is ready to rock is:
::
ssh root@$VIRTHOST uname -a
Getting the script
------------------
You can download the `quickstart.sh` script with `wget`:
::
wget https://raw.githubusercontent.com/openstack/tripleo-quickstart/master/quickstart.sh

View File

@ -1,37 +0,0 @@
Networking
----------
With a Virtual Environment, tripleo-quickstart sets up the networking as part of the workflow.
The networking arrangement needs to be set up prior to working with tripleo-quickstart.
The overcloud nodes will be deployed from the undercloud machine and therefore the
machines need to have have their network settings modified to allow for the
overcloud nodes to be PXE boot'ed using the undercloud machine.
As such, the setup requires that:
* All overcloud machines in the setup must support IPMI
* A management provisioning network is setup for all of the overcloud machines.
One NIC from every machine needs to be in the same broadcast domain of the
provisioning network. In the tested environment, this required setting up a new
VLAN on the switch. Note that you should use the same NIC on each of the
overcloud machines ( for example: use the second NIC on each overcloud
machine). This is because during installation we will need to refer to that NIC
using a single name across all overcloud machines e.g. em2
* The provisioning network NIC should not be the same NIC that you are using
for remote connectivity to the undercloud machine. During the undercloud
installation, a openvswitch bridge will be created for Neutron and the
provisioning NIC will be bridged to the openvswitch bridge. As such,
connectivity would be lost if the provisioning NIC was also used for remote
connectivity to the undercloud machine.
* The overcloud machines can PXE boot off the NIC that is on the private VLAN.
In the tested environment, this required disabling network booting in the BIOS
for all NICs other than the one we wanted to boot and then ensuring that the
chosen NIC is at the top of the boot order (ahead of the local hard disk drive
and CD/DVD drives).
* For each overcloud machine you have: the MAC address of the NIC that will PXE
boot on the provisioning network the IPMI information for the machine (i.e. IP
address of the IPMI NIC, IPMI username and password)
Refer to the following diagram for more information
.. image:: _images/TripleO_Network_Diagram_.jpg

View File

@ -1,22 +0,0 @@
Minimum System Requirements
---------------------------
By default, tripleo-quickstart requires 3 machines:
* 1 Undercloud (can be a Virtual Machine)
* 1 Overcloud Controller
* 1 Overcloud Compute
Commonly, deployments include HA (3 Overcloud Controllers) and multiple Overcloud Compute nodes.
Each Overcloud machine requires at least:
* 1 quad core CPU
* 8 GB free memory
* 60 GB disk space
The undercloud VM or baremetal machine requires:
* 1 quad core CPU
* 16 GB free memory
* 80 GB disk space

View File

@ -1,9 +0,0 @@
Validating the environment prior to deployment
----------------------------------------------
In a baremetal overcloud deployment there is a custom environment and many related settings
and steps. As such, it is worthwhile to validate the environment and custom configuration
files prior to deployment.
A collection of validation tools is available in the 'clapper' repo:
<https://github.com/rthallisey/clapper/>.

View File

@ -1,16 +0,0 @@
Virtual Undercloud VS. Baremetal Undercloud
-------------------------------------------
When deploying the overcloud on baremetal nodes, there is the option of using an undercloud
deployed on a baremetal machine or creating a virtual machine (VM) on that same baremetal machine
and using the VM to serve as the undercloud.
The advantages of using a VM undercloud are:
* The VM can be rebuilt and reinstalled without reprovisioning the entire baremetal machine
* The tripleo-quickstart default workflow is written for a Virtual Environment deployment.
Using a VM undercloud requires less customization of the default workflow.
.. note:: When using a VM undercloud, but baremetal nodes for the overcloud
deployment, the ``overcloud_nodes`` variable in tripleo-quickstart
must overwritten and set to empty.

View File

@ -1,82 +0,0 @@
Virtual Environment
===================
Quickstart can be used in a virtual environment using virtual machines instead
of actual baremetal. However, one baremetal machine ( VIRTHOST ) is still
needed to act as the host for the virtual machines.
Minimum System Requirements
---------------------------
By default, this setup creates 3 virtual machines:
* 1 Undercloud
* 1 Overcloud Controller
* 1 Overcloud Compute
.. note::
Each virtual machine must consist of at least 4 GB of memory and 40 GB of disk
space.
The virtual machine disk files are thinly provisioned and will not take up
the full 40GB initially.
You will need a baremetal host machine (referred to as ``$VIRTHOST``) with at least
**16G** of RAM, preferably **32G**, and you must be able to ``ssh`` to the
virthost machine as root without a password from the machine running ansible.
Currently the virthost machine must be running a recent Red Hat-based Linux
distribution (CentOS 7, RHEL 7, Fedora 22 - only CentOS 7 is currently tested),
but we hope to add support for non-Red Hat distributions too.
Quickstart currently supports the following operating systems:
* CentOS 7 x86_64
TripleO Quickstart
------------------
TripleO Quickstart is a fast and easy way to setup and configure your virtual environment for TripleO.
Further documentation can be found at https://github.com/openstack/tripleo-quickstart
A quick way to test that your virthost machine is ready to rock is::
ssh root@$VIRTHOST uname -a
Getting the script
^^^^^^^^^^^^^^^^^^
You can download the `quickstart.sh` script with `wget`::
wget https://raw.githubusercontent.com/openstack/tripleo-quickstart/master/quickstart.sh
Install the dependencies
^^^^^^^^^^^^^^^^^^^^^^^^
You need some software available on your local system before you can run
`quickstart.sh`. You can install the necessary dependencies by running::
bash quickstart.sh --install-deps
Setup your virtual environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Deploy with the most basic default options virtual environment by running::
bash quickstart.sh $VIRTHOST
There are many configuration options available in
tripleo-quickstart/config/general_config/ and also in
tripleo-quickstart-extras/config/general_config/
In the examples below the ha.yml config is located in the tripleo-quickstart repository
and the containers_minimal.yml is located in the tripleo-quickstart-extras repository.
All the configuration files will be installed to your working_directory.
This does require the user to know what the working directory is set to. The variable OPT_WORKDIR
by default is ~/.quickstart but can be overriden with -w or --working_dir
Please review these options and use the appropriate configuration for your deployment.
Below are some examples.::
bash quickstart.sh --config=~/.quickstart/config/general_config/ha.yml $VIRTHOST
bash quickstart.sh --config=~/.quickstart/config/general_config/containers_minimal.yml $VIRTHOST

View File

@ -1,87 +0,0 @@
Install the dependencies for TripleO Quickstart
-----------------------------------------------
You need some software available on your local system before you can run
`quickstart.sh`. You can install the necessary dependencies by running:
::
bash quickstart.sh --install-deps
Deploy TripleO using Quickstart on Openstack Instances
------------------------------------------------------
Deployments on Openstack instances require steps from third-party repos,
in addition to the steps in TripleO Quickstart.
Below is an example of a complete call to quickstart.sh to run a full deploy
on Openstack Instances launched via Openstack Virtual Baremetal (OVB/Heat):
::
# $HW_ENV_DIR is the directory where the environment-specific
# files are stored
pushd $WORKSPACE/tripleo-quickstart
bash quickstart.sh \
--ansible-debug \
--bootstrap \
--working-dir $WORKSPACE/ \
--tags all \
--no-clone \
--requirements quickstart-role-requirements.txt \
--requirements $WORKSPACE/$HW_ENV_DIR/requirements_files/$REQUIREMENTS_FILE \
--config $WORKSPACE/$HW_ENV_DIR/config_files/$CONFIG_FILE \
--extra-vars @$OPENSTACK_CLOUD_SETTINGS_FILE \
--extra-vars @$OPENSTACK_CLOUD_CREDS_FILE \
--extra-vars @$WORKSPACE/$HW_ENV_DIR/env_settings.yml \
--playbook $PLAYBOOK \
--release $RELEASE \
localhost
popd
Modify the settings
^^^^^^^^^^^^^^^^^^^
After the undercloud connectivity has been set up, the undercloud is installed and the
overcloud is deployed following the 'baremetal' workflow, using settings relevant to the
undercloud and baremetal nodes created on the Openstack cloud.
Below are a list of example settings (overwriting defaults) that would be passed to quickstart.sh:
::
# undercloud.conf
undercloud_network_cidr: 192.0.2.0/24
undercloud_local_ip: 192.0.2.1/24
undercloud_network_gateway: 192.0.2.1
undercloud_undercloud_public_vip: 192.0.2.2
undercloud_undercloud_admin_vip: 192.0.2.3
undercloud_local_interface: eth1
undercloud_masquerade_network: 192.0.2.0/24
undercloud_dhcp_start: 192.0.2.5
undercloud_dhcp_end: 192.0.2.24
undercloud_inspection_iprange: 192.0.2.25,192.0.2.39
overcloud_nodes:
undercloud_type: ovb
introspect: true
# file locations to be copied to the undercloud (for network-isolation deployment)
undercloud_instackenv_template: "{{ local_working_dir }}/instackenv.json"
network_environment_file: "{{ local_working_dir }}/openstack-virtual-baremetal/network-templates/network-environment.yaml"
baremetal_nic_configs: "{{ local_working_dir }}/openstack-virtual-baremetal/network-templates/nic-configs"
network_isolation: true
# used for access to external network
external_interface: eth2
external_interface_ip: 10.0.0.1
external_interface_netmask: 255.255.255.0
external_interface_hwaddr: fa:05:04:03:02:01
# used for validation
floating_ip_cidr: 10.0.0.0/24
public_net_pool_start: 10.0.0.50
public_net_pool_end: 10.0.0.100
public_net_gateway: 10.0.0.1

View File

@ -1,14 +0,0 @@
TripleO Quickstart
==================
TripleO Quickstart is a fast and easy way to setup and configure your virtual environment for TripleO.
Further documentation can be found at https://github.com/openstack/tripleo-quickstart.
Getting the script
------------------
You can download the `quickstart.sh` script with `wget`:
::
wget https://raw.githubusercontent.com/openstack/tripleo-quickstart/master/quickstart.sh

View File

@ -1,85 +0,0 @@
Running TripleO Quickstart on Openstack instances
-------------------------------------------------
By default, TripleO Quickstart uses libvirt to create virtual machines (VM) to serve
as undercloud and overcloud nodes for a TripleO deployment.
With some steps and modification, TripleO Quickstart can setup an undercloud and
deploy the overcloud on instances launched on an Openstack cloud rather than libvirt VMs.
Beginning assumptions
^^^^^^^^^^^^^^^^^^^^^
This document details the workflow for running TripleO Quickstart on Openstack
instances. In particular, the example case is instances created via Heat and
Openstack Virtual Baremetal <https://github.com/openstack/openstack-virtual-baremetal>.
The following are assumed to have been completed before following this document:
* An Openstack cloud exists and has been set up
(and configured as described in `Patching the Host Cloud`_.
if the cloud is pre-Mitaka release). From the Mitaka release the cloud should
not require patching
* The undercloud image under test has been uploaded to Glance in the Openstack cloud.
* A heat stack has been deployed with instances for the undercloud, bmc, and overcloud nodes.
* The nodes.json file has been created (later to be copied to the undercloud as instackenv.json)
Below is an example `env.yaml` file used to create the heat stack that will support a
tripleo-quickstart undercloud and overcloud deployment with network isolation:
.. _Patching the Host Cloud: https://openstack-virtual-baremetal.readthedocs.io/en/latest/host-cloud/patches.html
::
parameters:
os_user: admin
os_password: password
os_tenant: admin
os_auth_url: http://10.10.10.10:5000/v2.0
bmc_flavor: m1.medium
bmc_image: 'bmc-base'
bmc_prefix: 'bmc'
baremetal_flavor: m1.large
baremetal_image: 'ipxe-boot'
baremetal_prefix: 'baremetal'
key_name: 'key'
private_net: 'private'
node_count: {{ node_count }}
public_net: 'public'
provision_net: 'provision'
# QuintupleO-specific params ignored by virtual-baremetal.yaml
undercloud_name: 'undercloud'
undercloud_image: '{{ latest_undercloud_image }}'
undercloud_flavor: m1.xlarge
external_net: '{{ external_net }}'
undercloud_user_data: |
#!/bin/sh
sed -i "s/no-port-forwarding.*sleep 10\" //" /root/.ssh/authorized_keys
#parameter_defaults:
## Uncomment and customize the following to use an existing floating ip
# undercloud_floating_ip_id: 'uuid of floating ip'
# undercloud_floating_ip: 'address of floating ip'
resource_registry:
## Uncomment the following to use an existing floating ip
# OS::OVB::UndercloudFloating: templates/undercloud-floating-existing.yaml
## Uncomment the following to use no floating ip
# OS::OVB::UndercloudFloating: templates/undercloud-floating-none.yaml
## Uncomment the following to create a private network
OS::OVB::PrivateNetwork: {{ templates_dir }}/private-net-create.yaml
## Uncomment to create all networks required for network-isolation.
## parameter_defaults should be used to override default parameter values
## in baremetal-networks-all.yaml
OS::OVB::BaremetalNetworks: {{ templates_dir }}/baremetal-networks-all.yaml
OS::OVB::BaremetalPorts: {{ templates_dir }}/baremetal-ports-all.yaml
## Uncomment to deploy a quintupleo environment without an undercloud.
# OS::OVB::UndercloudEnvironment: OS::Heat::None

View File

@ -1,45 +0,0 @@
---
name: collect_logs
namespace: tripleo
version: 0.0.1
authors:
- tripleo
readme: README.rst
build_ignore:
- "**/.mypy_cache"
- "**/.pytest_cache"
- "*.egg-info"
- .DS_Store
- .ansible
- .benchmarks
- .cache
- .eggs
- .envrc
- .github
- .gitignore
- .gitreview
- .mypy_cache
- .pre-commit-config.yaml
- .pytest_cache
- .quickstart
- .tox
- .vscode
- .yamllint
- ansible.cfg
- bindep.txt
- build
- dist
- docs/source/_build
- infrared_plugin
- module_utils
- modules
- pyproject.toml
- report.html
- roles/collect_logs/library
- scripts
- setup.cfg
- setup.py
- test-playbooks
- tox.ini
- zuul.d

View File

@ -1,75 +0,0 @@
---
# This file and plugin.spec are required by Infrared project
# This section collects data from the nodes
- hosts: "{{ other.openstack_nodes }}"
remote_user: "{{ other.remote_user }}"
ignore_errors: true
gather_facts: false
vars:
ansible_python_interpreter: "{{ py_interpreter.get('stdout_lines', ['/usr/libexec/platform-python']) | first | trim }}"
tasks:
- name: Detect python interpreter
raw: 'command -v python3 python2 /usr/libexec/platform-python'
register: py_interpreter
- name: Gather facts
setup:
- name: Remap infrared parameters to role variables
set_fact:
"{{ item.key }}": "{{ item.value }}"
with_dict: "{{ other }}"
- name: Ansible role collect logs
include_role:
name: collect_logs
# This section takes care of preparing the collected data for publishing
# and for publishing itself
- hosts: localhost
ignore_errors: true
tasks:
- name: Remap infrared parameters to role variables
set_fact:
"{{ item.key }}": "{{ item.value }}"
with_dict: "{{ other }}"
- name: Disable artcl_collect to prepare for publishing
set_fact:
# override artcl_collect to false because in ansible-role-collect-logs
# role collect and publish tasks are complementary
artcl_collect: false
when: artcl_publish|default(false)|bool
- name: Set path to a report server key
set_fact:
artcl_report_server_key: "-i {{ artcl_report_server_key }}"
when: artcl_report_server_key is defined
- name: Extract the logs
shell: |
cat *.tar | tar xf - -i
args:
chdir: "{{ artcl_collect_dir }}"
executable: /bin/bash
- name: delete the tar file after extraction
shell: |
rm -r *.tar
args:
chdir: "{{ artcl_collect_dir }}"
executable: /bin/bash
- name: Ansible role collect logs
include_role:
name: collect_logs
when: artcl_publish|default(false)|bool
- name: Delete artifact files from localhost
file:
state: absent
path: "{{ artcl_collect_dir }}"
when: not disable_artifacts_cleanup | default(false) | bool

View File

@ -1,215 +0,0 @@
---
# This file and main.yml are required by Infrared project
config:
plugin_type: other
entry_point: main.yml
roles_path: ../roles/
subparsers:
ansible-role-collect-logs:
description: An Ansible role for aggregating logs from different nodes.
include_groups: ["Ansible options", "Common options"]
groups:
- title: Collecting
options:
openstack_nodes:
type: Value
help: |
OpenStack nodes ansible-role-collect-logs will be executed on.
default: all:!localhost
artcl_report_server_key:
type: Value
help: |
A path to a key for an access to the report server.
artcl_rsync_path:
type: Value
help: |
Specifies a server hostname and a path where the artifacts will
be stored. Example: username@hostname:/path/to/the/dest
artcl_collect_list:
type: ListValue
help: |
A list of files and directories to gather from the target.
Directories are collected recursively and need to end with a “/”
to get collected. Should be specified as a YaML list, e.g.:
infrared ansible-role-collect-logs \
--artcl_collect_list /etc/nova/,/home/stack/*.log,/var/log/
artcl_collect_list_append:
type: ListValue
help: |
A list of files and directories to be appended in the default
list. This is useful for users that want to keep the original
list and just add more relevant paths.
artcl_exclude_list:
type: ListValue
help: |
A list of files and directories to exclude from collecting. This
list is passed to rsync as an exclude filter and it takes
precedence over the collection list. For details see the
“FILTER RULES” topic in the rsync man page.
artcl_exclude_list_append:
type: ListValue
help: |
A list of files and directories to be appended in the default
exclude list. This is useful for users that want to keep the
original list and just add more relevant paths.
artcl_commands:
type: NestedDict
help: |
Collect commands executed by the role. Keep the dict sorted.
Example: --artcl_commands <group_type>.<command name>.cmd=<command>
Note: group types to be collected are defined by collect_log_types
Example2: --artcl_commands system.cpuinfo.cmd="cat /proc/cpuinfo"
artcl_commands_extras:
type: NestedDict
help: |
Commands to be executed, combined with artcl_commands.
artcl_find_maxdepth:
type: Value
help: |
Max depth passed to find via -maxdepth arg, it makes effect only
when artcl_rsync_collect_list is set to False.
default: 4
artcl_find_max_size:
type: Value
help: |
Max file size passed to find via -size arg, it makes effect only
when artcl_rsync_collect_list is set to False.
default: 256
artcl_rsync_collect_list:
type: Bool
help: |
If true, artcl_collect_list is given to rsync to collect
logs, otherwise it is given to find to create a list of files
to collect for rsync.
default: True
local_working_dir:
type: Value
help: |
Destination on the executor host where the logs will be collected
to.
default: /tmp/collect_logs
artcl_collect_dir:
type: Value
help: |
A directory on the executor host within local_working_dir where
the logs should be gathered, without a trailing slash.
artcl_build_url:
type: Value
help: |
Build URL used for fetching console.log
artcl_gzip:
type: Bool
help: |
When true, gathered files are gzipped one by one
in artcl_collect_dir, when false, a tar.gz file will contain all
the logs.
collect_log_types:
type: ListValue
help: |
A list of which type of logs will be collected, such as openstack
logs, network logs, system logs, etc. Acceptable values are
system, monitoring, network, openstack and container.
artcl_collect_sosreport:
type: Bool
help: |
If true, create and collect a sosreport for each host.
- title: Publishing
options:
artcl_publish:
type: Bool
help: |
If true, the role will attempt to rsync logs to the target
specified by artcl_rsync_url. Uses BUILD_URL, BUILD_TAG vars from
the environment (set during a Jenkins job run) and requires the
next to variables to be set.
artcl_txt_rename:
type: Bool
help: |
Rename compressed text based files to end with txt.gz extension.
artcl_readme_path:
type: Value
help: |
Path to a readme file to be copied to base directory, containing
information regarding the logs.
artcl_readme_file:
type: Value
help: |
Name of the readme file
artcl_publish_timeout:
type: Value
help: |
The maximum seconds the role can spend uploading the logs.
artcl_use_rsync:
type: Bool
help: |
If true, the role will use rsync to upload the logs.
artcl_rsync_use_daemon:
type: Bool
help: |
If true, the role will use rsync daemon instead of ssh to
connect.
artcl_rsync_url:
type: Value
help: |
rsync target for uploading the logs. The localhost needs to have
passwordless authentication to the target or the PROVISIONER_KEY
var specificed in the environment.
artcl_use_swift:
type: Bool
help: |
If true, the role will use swift object storage to publish
the logs.
artcl_swift_auth_url:
type: Value
help: |
The OpenStack auth URL for Swift.
artcl_swift_username:
type: Value
help: |
OpenStack username for Swift.
artcl_swift_password:
type: Value
help: |
Password for the Swift user.
artcl_swift_tenant_name:
type: Value
help: |
OpenStack tenant name for Swift.
artcl_swift_container:
type: Value
help: |
The name of the Swift container to use.
artcl_swift_delete_after:
type: Value
help: |
The number of seconds after which Swift will remove the uploaded
objects.
artcl_artifact_url:
type: Value
help: |
An HTTP URL at which the uploaded logs will be accessible after
upload.
influxdb_create_data_file:
type: Bool
help: |
Upload data to the InfluxDB database.
default: False
ara_enabled:
type: Bool
help: |
If true, the role will generate ara reports.
ara_generate_html:
type: Bool
help: |
Whether to generate ara html or not.
default: False
remote_user:
type: Value
help: |
Name of a remote user under which the tasks will be executed.
default: stack
disable_artifacts_cleanup:
type: Bool
help: |
Determines whether to keep collected files
default: False

View File

@ -1 +0,0 @@
../roles

View File

@ -1,59 +0,0 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
try:
from unittest.mock import patch
except ImportError:
from mock import patch # old pythons
def set_module_args(**args):
if "_ansible_remote_tmp" not in args:
args["_ansible_remote_tmp"] = "/tmp"
if "_ansible_keep_remote_files" not in args:
args["_ansible_keep_remote_files"] = False
args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
basic._ANSIBLE_ARGS = to_bytes(args)
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
def exit_json(*args, **kwargs):
if "changed" not in kwargs:
kwargs["changed"] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs):
kwargs["failed"] = True
raise AnsibleFailJson(kwargs)
class ModuleTestCase:
def setup_method(self):
self.mock_module = patch.multiple(
basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json,
)
self.mock_module.start()
def teardown_method(self):
self.mock_module.stop()
def generate_name(test_case):
return test_case["name"]

View File

@ -1,190 +0,0 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: ara_graphite
version_added: "1.0.0"
author: Red Hat (@RedHatOfficial)
short_description: Send ARA stats to graphite
description: >
Python ansible module to send ARA stats to graphite
options:
graphite_host:
description: >
The hostname of the Graphite server with optional port:
graphite.example.com:2004. The default port is 2003
required: True
type: str
graphite_prefix:
description:
- TBD
type: str
graphite_port:
description:
- TBD
default: 2003
type: int
ara_mapping:
description: >
Mapping task names to Graphite paths
required: True
type: dict
ara_data:
description: >
List of ARA results: ara result list --all -f json
required: True
type: str
only_successful_tasks:
description: >
Whether to send only successful tasks, ignoring skipped and failed,
by default True.
required: False
default: True
type: bool
"""
EXAMPLES = """
- name: Get ARA json data
shell: "{{ local_working_dir }}/bin/ara task list --all -f json"
register: ara_data
- ara_graphite:
graphite_host: 10.2.2.2
ara_data: "{{ ara_task_output.stdout }}"
ara_mapping:
- "Name of task that deploys overcloud": overcloud.deploy.seconds
"""
import ast # noqa: E402
import datetime # noqa: E402
import socket # noqa: E402
def stamp(x):
"""Convert ISO timestamp to Unix timestamp
:param x: string with timestamp
:return: string with Unix timestamp
"""
return datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S").strftime("%s")
def task_length(x):
"""Calculate task length in seconds from "%H:%M:%S" format
:param x: datetime string
:return: number of seconds spent for task
"""
t = datetime.datetime.strptime(x, "%H:%M:%S")
return datetime.timedelta(
hours=t.hour, minutes=t.minute, seconds=t.second
).total_seconds()
def translate(mapping, json_data, only_ok):
"""Create data to send to Graphite server in format:
GraphitePath Timestamp TaskDuration
GraphitePath is taken from mapping dictionary according to task name.
:param mapping: dictionary of mapping task names to graphite paths
:param json_data: JSON data with tasks and times
:return: list of graphite data
"""
items = []
data = ast.literal_eval(json_data)
for task in data:
if not only_ok or (only_ok and task["Status"] in ["changed", "ok"]):
if task["Name"] in mapping:
timestamp, duration = stamp(task["Time Start"]), task_length(
task["Duration"]
)
items.append([mapping[task["Name"]], duration, timestamp])
return items
def send(data, gr_host, gr_port, prefix):
"""Actual sending of data to Graphite server via network
:param data: list of items to send to Graphite
:param gr_host: Graphite host (with optional port)
:param prefix: prefix to append before Graphite path
:return: True if sent successfully, otherwise False
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3.0)
try:
s.connect((gr_host, gr_port))
except Exception as exc:
return False, str(exc)
for content in data:
s.send(prefix + " ".join([str(i) for i in content]) + "\n")
s.close()
return True, ""
def send_stats(gr_host, gr_port, mapping, json_data, prefix, only_ok):
"""Send ARA statistics to Graphite server
:param gr_host: Graphite host (with optional port)
:param mapping: dictionary of mapping task names to graphite paths
:param json_data: JSON data with tasks and times
:param prefix: prefix to append before Graphite path
:return: JSON ansible result
"""
data2send = translate(mapping, json_data, only_ok)
response, reason = send(data2send, gr_host, gr_port, prefix)
if not response:
return {
"changed": False,
"failed": True,
"graphite_host": gr_host,
"msg": "Can't connect to Graphite: %s" % reason,
}
return {
"changed": True,
"graphite_host": gr_host,
"sent_data": data2send,
}
def main():
from ansible.module_utils.basic import AnsibleModule
module = AnsibleModule(
argument_spec=dict(
graphite_host=dict(required=True, type="str"),
graphite_port=dict(required=False, type="int", default=2003),
ara_mapping=dict(required=True, type="dict"),
ara_data=dict(required=True, type="str"),
graphite_prefix=dict(required=False, type="str", default=""),
only_successful_tasks=dict(required=False, type="bool", default=True),
)
)
result = send_stats(
module.params["graphite_host"],
module.params["graphite_port"],
module.params["ara_mapping"],
module.params["ara_data"],
module.params["graphite_prefix"],
module.params["only_successful_tasks"],
)
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -1,593 +0,0 @@
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: ara_influxdb
version_added: "1.0.0"
author: Red Hat (@RedHatOfficial)
short_description: Send ARA stats to InfluxDB
description: |
Python ansible module to send ARA stats to InfluxDB timeseries database
options:
influxdb_url:
description: |
The URL of HTTP API of InfluxDB server:
for example https://influxdb.example.com
required: True
type: str
influxdb_port:
description: |
The port of HTTP API of InfluxDB server, by default is 8086
required: True
type: int
influxdb_user:
description: |
User for authentication to InfluxDB server
required: False
type: str
influxdb_password:
description: |
Password for authentication to InfluxDB server
required: False
type: str
influxdb_db:
description: |
Database name in InfluxDB server for sending data to it
required: True
type: str
measurement:
description: |
Name of Influx measurement in database
required: True
type: str
data_file:
description: |
Path to file to save InfluxDB data in it
required: True
type: str
ara_data:
description: |
List of ARA results: ara result list --all -f json
required: True
type: str
only_successful_tasks:
description: |
Whether to send only successful tasks, ignoring skipped and failed,
by default True.
required: True
type: bool
mapped_fields:
description: |
Whether to use configured static map of fields and tasks,
by default True.
required: False
default: True
type: bool
standard_fields:
description: >
Whether to send standard fields of each job, i.e. times,
by default True.
required: False
default: True
type: bool
longest_tasks:
description: >
Whether to to print only longest tasks and how many,
by default 0.
required: False
type: int
"""
EXAMPLES = """
- name: Get ARA json data
shell: "{{ local_working_dir }}/bin/ara result list --all -f json"
register: ara_data
- name: Collect and send data to InfluxDB
ara_influxdb:
influxdb_url: https://influxdb.example.com
influxdb_port: 8086
influxdb_user: db_user
influxdb_password: db_password
influxdb_db: db_name
ara_data: "{{ ara_data.stdout }}"
measurement: test
data_file: /tmp/test_data
only_successful_tasks: true
mapped_fields: false
standard_fields: false
longest_tasks: 15
when: ara_data.stdout != "[]"
"""
import ast # noqa pylint: disable=C0413
import datetime # noqa pylint: disable=C0413
import json # noqa pylint: disable=C0413
import os # noqa pylint: disable=C0413
import re # noqa pylint: disable=C0413
SCHEME = "{measure},{tags} {fields} {timestamp}"
CUSTOM_MAP = {
"undercloud_install": ["undercloud-deploy : Install the undercloud"],
"prepare_images": [
"overcloud-prep-images : Prepare the overcloud images for deploy"
],
"images_update": [
"modify-image : Convert image",
"modify-image : Run script on image",
"modify-image : Close qcow2 image",
],
"images_build": ["build-images : run the image build script (direct)"],
"containers_prepare": [
"overcloud-prep-containers : Prepare for the containerized deployment"
],
"overcloud_deploy": ["overcloud-deploy : Deploy the overcloud"],
"pingtest": ["validate-simple : Validate the overcloud"],
"tempest_run": ["validate-tempest : Execute tempest"],
"undercloud_reinstall": [
"validate-undercloud : Reinstall the undercloud to check idempotency"
],
"overcloud_delete": [
"overcloud-delete : check for delete command to complete or fail"
],
"overcloud_upgrade": [
"overcloud-upgrade : Upgrade the overcloud",
"tripleo-upgrade : run docker upgrade converge step",
"tripleo-upgrade : run docker upgrade composable step",
],
"undercloud_upgrade": ["tripleo-upgrade : upgrade undercloud"],
}
class InfluxStandardTags(object):
"""InfluxStandardTags contains:
calculation of standard job describing parameters as:
* release
* nodepool provider cloud
* zuul pipeline name
* toci_jobtype
and rendering them in tags template
"""
def branch(self):
return os.environ.get("STABLE_RELEASE") or "master"
def cloud(self):
return os.environ.get("NODEPOOL_PROVIDER", "null")
def pipeline(self):
if os.environ.get("ZUUL_PIPELINE"):
if "check" in os.environ["ZUUL_PIPELINE"]:
return "check"
elif "gate" in os.environ["ZUUL_PIPELINE"]:
return "gate"
elif "periodic" in os.environ["ZUUL_PIPELINE"]:
return "periodic"
return "null"
def toci_jobtype(self):
return os.environ.get("TOCI_JOBTYPE", "null")
def render(self):
return ("branch=%s," "cloud=%s," "pipeline=%s," "toci_jobtype=%s") % (
self.branch(),
self.cloud(),
self.pipeline(),
self.toci_jobtype(),
)
class InfluxStandardFields(object):
"""InfluxStandardFields contains:
calculation of time of job steps as:
* whole job duration
* testing environment preparement
* quickstart files and environment preparement
* zuul host preparement
and rendering them in template
"""
def job_duration(self):
if os.environ.get("START_JOB_TIME"):
return int(datetime.datetime.utcnow().strftime("%s")) - int(
os.environ.get("START_JOB_TIME")
)
return 0
def logs_size(self):
# not implemented
return 0
def timestamp(self):
return datetime.datetime.utcnow().strftime("%s")
def testenv_prepare(self):
return os.environ.get("STATS_TESTENV", 0)
def quickstart_prepare(self):
return os.environ.get("STATS_OOOQ", 0)
def zuul_host_prepare(self):
if os.environ.get("DEVSTACK_GATE_TIMEOUT") and os.environ.get( # noqa: W504
"REMAINING_TIME"
):
return (
int(os.environ["DEVSTACK_GATE_TIMEOUT"])
- int(os.environ["REMAINING_TIME"])
) * 60
return 0
def render(self):
return (
"job_duration=%d,"
"logs_size=%d,"
"testenv_prepare=%s,"
"quickstart_prepare=%s,"
"zuul_host_prepare=%d,"
) % (
self.job_duration(),
self.logs_size(),
self.testenv_prepare(),
self.quickstart_prepare(),
self.zuul_host_prepare(),
)
class InfluxConfiguredFields(object):
"""InfluxConfiguredFields contains calculation:
* whole job duration
* testing environment preparement
* quickstart files and environment preparement
* zuul host preparement
and rendering them in template
"""
def __init__(self, match_map, json_data, only_ok=True):
"""Set up data for configured field
:param match_map {dict} -- Map of tasks from ansible playbook to
names of data fields in influxDB.
:param json_data: {dict} -- JSON data generated by ARA
:param only_ok=True: {bool} -- to count only passed tasks
"""
self.map = match_map
self.only_ok = only_ok
self.data = json_data
def task_maps(self):
times_dict = tasks_times_dict(self.data, self.only_ok)
tasks = {}
for i in self.map:
tasks[i] = sum([int(times_dict.get(k, 0)) for k in self.map[i]])
return tasks
def render(self):
tasks = self.task_maps()
result = ""
for task, timest in tasks.items():
result += "%s=%d," % (task, timest)
return result
class InfluxLongestFields(object):
"""InfluxLongestFields runs calculation of:
tasks that took the longest time.
The tasks could be from undercloud or overcloud playbooks.
"""
def __init__(self, json_data, only_ok=True, top=15):
"""Constructor for InfluxLongestFields
:param json_data: {dict} -- JSON data generated by ARA
:param only_ok=True: {bool} -- to count only passed tasks
:param top=15: {int} -- how many tasks to send to DB
"""
self.top = top
self.only_ok = only_ok
self.data = json_data
def collect_tasks(self):
tasks_dict = tasks_times_dict(self.data, self.only_ok)
return sorted(
[[k, v] for k, v in tasks_dict.items()], key=lambda x: x[1], reverse=True
)[: self.top]
def translate_names(self, names):
for i in names:
i[0] = re.sub(
r"[^0-9A-z\-_]+", "", i[0].replace(":", "__").replace(" ", "_")
)
i[1] = int(i[1])
return names
def render(self):
result = ""
for i in self.translate_names(self.collect_tasks()):
result += "{0}={1},".format(*i)
return result
class SovaFields(object):
"""SovaFields provides Sova calculated failure reasons."""
def __init__(self, sova_file):
"""Constructor for SovaFields
:param sova_file: {str} -- path to 'failures_file' of Sova
"""
self.sova_file = sova_file
def parse_sova_file(self):
if not os.path.exists(self.sova_file):
return ""
with open(self.sova_file) as f:
text = f.readlines()
reason = text[0]
reason_tag = text[1].split("Reason: ")[1]
return reason.strip(), reason_tag.strip()
def render(self):
scheme = 'sova_reason="%s",sova_tag="%s",'
res = self.parse_sova_file()
if not res:
return scheme % ("", "")
return scheme % (res[0], res[1])
def tasks_times_dict(tasks, only_ok=True):
times_dict = {}
for task in tasks:
if not only_ok or task["Status"] in ["changed", "ok"]:
name = task["Name"]
if name in times_dict:
times_dict[name].append(task["Duration"])
else:
times_dict[name] = [task["Duration"]]
# because of some tasks are executed multiple times we need to count
# all of them and make summary of all durations
for i in times_dict:
times_dict[i] = sum([task_length(t) for t in times_dict[i]])
return times_dict
def task_length(x):
"""Calculate task length in seconds from "%H:%M:%S" format
Arguments:
x {string} -- a timestamp
Returns:
int -- total seconds for the task
"""
t = datetime.datetime.strptime(x, "%H:%M:%S")
return datetime.timedelta(
hours=t.hour, minutes=t.minute, seconds=t.second
).total_seconds()
def translate(
measure,
json_data,
only_ok,
mapped_fields=True,
standard_fields=True,
longest_tasks=0,
data_file=None,
):
"""Create data to send to InfluxDB server in format SCHEME
Fields keys are taken from ARA data according to task names.
:param measure: name of InfluxDB measurement
:param json_data: JSON data with tasks and times
:param: only_ok: boolean, where to count only successful tasks
:return: full InfluxDB scheme
"""
data = ast.literal_eval(json_data)
data = json.loads(data)
tags = InfluxStandardTags()
std_fields = InfluxStandardFields()
map_fields = InfluxConfiguredFields(
match_map=CUSTOM_MAP, json_data=data, only_ok=only_ok
)
longest_fields = InfluxLongestFields(
json_data=data, top=longest_tasks, only_ok=only_ok
)
fields = ""
if standard_fields:
fields += std_fields.render()
if mapped_fields:
fields += map_fields.render()
if longest_tasks:
fields += longest_fields.render()
if data_file:
sova_fields = SovaFields(
os.path.join(os.path.dirname(data_file), "failures_file")
)
fields += sova_fields.render()
fields = fields.rstrip(",")
result = SCHEME.format(
measure=measure,
tags=tags.render(),
fields=fields,
timestamp=std_fields.timestamp(),
)
return result
def create_file_with_data(data, path):
"""Create a file with InfluxDB data to send
:param data: data to write
:param path: path of the file
:return:
"""
with open(path, "a") as f:
f.write(data + "\n")
def send(file_path, in_url, in_port, in_user, in_pass, in_db):
"""Actual sending of data to InfluxDB server via network
:param file_path: path to file with data to send
:param in_url: InfluxDB URL
:param in_port: InfluxDB port
:param in_user: InfluxDB user
:param in_pass: InfluxDB password
:param in_db: InfluxDB database name
:return: True if sent successfully, otherwise False
"""
import requests # noqa pylint: disable=C0413
from requests.auth import HTTPBasicAuth # noqa pylint: disable=C0413
url = in_url.rstrip("/")
if in_port != 80:
url += ":%d" % in_port
url += "/write"
params = {"db": in_db, "precision": "s"}
if in_user:
if not in_pass:
if os.environ.get("INFLUXDB_PASSWORD"):
with open(os.environ["INFLUXDB_PASSWORD"]) as f:
in_pass = f.read().strip()
else:
return False, "InfluxDB password was not provided!"
auth = HTTPBasicAuth(in_user, in_pass)
else:
auth = None
with open(file_path, "rb") as payload:
req = requests.post(url, params=params, data=payload, auth=auth, verify=False)
if not req or req.status_code != 204:
return False, "HTTP: %s\nResponse: %s" % (req.status_code, req.content)
return True, ""
def send_stats(
in_url,
in_port,
in_user,
in_pass,
in_db,
json_data,
measure,
data_file,
only_ok,
mapped_fields=True,
standard_fields=True,
longest_tasks=0,
):
"""Send ARA statistics to InfluxDB server
:param in_url: InfluxDB URL
:param in_port: InfluxDB port
:param in_user: InfluxDB user
:param in_pass: InfluxDB password
:param in_db: InfluxDB database name
:param json_data: JSON data with tasks and times from ARA
:param measure: InfluxDB measurement name
:param data_file: path to file with data to send
:param: only_ok: boolean, where to count only successful tasks
:param: mapped_fields: if to use configured map of fields and tasks
:param: standard_fields: if to send standard fields of each job, i.e. times
:param: longest_tasks: if to print only longest tasks and how many
:return: JSON ansible result
"""
data2send = translate(
measure,
json_data,
only_ok,
mapped_fields,
standard_fields,
longest_tasks,
data_file,
)
create_file_with_data(data2send, data_file)
if in_url:
response, reason = send(data_file, in_url, in_port, in_user, in_pass, in_db)
if not response:
return {
"changed": False,
"failed": True,
"influxdb_url": in_url,
"msg": reason,
}
return {
"changed": True,
"influxdb_url": in_url,
"sent_data": data2send,
}
else:
return {
"changed": True,
"data_file": data_file,
"sent_data": data2send,
}
def main():
module = AnsibleModule( # noqa
argument_spec=dict(
influxdb_url=dict(required=True, type="str"),
influxdb_port=dict(required=True, type="int"),
influxdb_user=dict(required=False, type="str", default=None),
influxdb_password=dict(
required=False, type="str", default=None, no_log=True
),
influxdb_db=dict(required=True, type="str"),
ara_data=dict(required=True, type="str"),
measurement=dict(required=True, type="str"),
data_file=dict(required=True, type="str"),
only_successful_tasks=dict(required=True, type="bool"),
mapped_fields=dict(default=True, type="bool"),
standard_fields=dict(default=True, type="bool"),
longest_tasks=dict(default=0, type="int"),
)
)
result = send_stats(
module.params["influxdb_url"],
module.params["influxdb_port"],
module.params["influxdb_user"],
module.params["influxdb_password"],
module.params["influxdb_db"],
module.params["ara_data"],
module.params["measurement"],
module.params["data_file"],
module.params["only_successful_tasks"],
module.params["mapped_fields"],
module.params["standard_fields"],
module.params["longest_tasks"],
)
module.exit_json(**result)
# pylint: disable=W0621,W0622,W0614,W0401,C0413
from ansible.module_utils.basic import * # noqa
if __name__ == "__main__":
main()

View File

@ -1,81 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
module: flatten_nested_dict
author: Red Hat (@RedHatOfficial)
version_added: '2.7.0'
short_description: Flattens a nested dictionary into a list
notes: []
description:
- Flattens the commands nested dictionary into a list of commands.
options:
data:
description:
- Nested dictionary
required: True
type: dict
"""
EXAMPLES = """
- name: Determine commands to run
flatten_nested_dict:
data:
system:
cmd: df
"""
RETURN = """
data:
description: Commands to be executed
returned: success
type: list
sample:
- 'cmd': 'df'
'capture_file': '/var/log/extra/df.txt'
'name': 'df'
'group': 'system'
"""
from ansible.module_utils.basic import AnsibleModule # noqa: E402
def main():
result = {"data": [], "changed": False}
module = AnsibleModule(argument_spec={"data": {"type": "dict", "required": True}})
try:
for group, commands in module.params["data"].items():
for cmd_name, cmd_dict in commands.items():
cmd_dict["name"] = cmd_name
cmd_dict["group"] = group
result["data"].append(cmd_dict)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -1,301 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
module: sova
author: Sagi Shnaidman (@sshnaidm)
version_added: '2.7.0'
short_description: Parse CI jobs files for known failures
notes: []
description:
- Parse CI job files and find there known patterns of failures
requirements:
- "Better to use with 'regex' module installed"
options:
files:
description:
- Dictionary of patterns file name and file location.
Patterns are divided by sections in config file, match each section
to the file path on the host, It will search these patterns from this
section in the given file.
required: True
type: dict
result:
description:
- Path to file where to write result message.
type: path
result_file_dir:
description:
- Directory where to create a file with result message and name
of file. For example for pattern 'Overcloud failed on host' will be
created file Overcloud_failed_on_host.log in this directory.
It helps to know what is the reason without opening actually the file.
type: path
config:
description: config
type: dict
"""
EXAMPLES = """
- name: Run sova task
sova:
files:
console: /var/log/job-output.txt.gz
errors: /var/log/errors.txt.gz
"ironic-conductor": /var/log/ironic-conductor.log.txt.gz
syslog: /var/log/journal.txt.gz
logstash: /var/log/logstash.txt.gz
bmc: /var/log/bmc-console.log
result: /home/zuul/result_file
result_file_dir: /home/zuul/workspace/logs/
"""
RETURN = """
processed_files:
description:
- Files which have been processed by module
returned: if changed
type: list
sample: [
"/tmp/var/log/job-output.txt.gz",
"/tmp/var/log/errors.txt.gz",
"/tmp/var/log/ironic-conductor.log.txt.gz"
]
message:
description:
- Text with all messages about failures
returned: if changed
type: list
sample: 'Overcloud stack: FAILED.'
tags:
description:
- Tags of patterns which were found in files
returned: if changed
type: list
sample: ["info"]
file_name_written:
description:
- Path of file which written with message as filename
returned: if changed
type: str
sample: '/var/log/_Overcloud_stack__FAILED.log'
file_written:
description:
- Path of file where written result message and reason.
returned: if changed
type: str
sample: '/var/log/result_file'
"""
import gzip # noqa: E402
import logging # noqa: E402
import os # noqa: E402
from copy import deepcopy # noqa: E402
from ansible.module_utils.basic import AnsibleModule # noqa: E402
try:
import regex as regex_module
except ImportError:
import re as regex_module
__metaclass__ = type
logging.basicConfig(
format=(
"%(asctime)s - %(name)s - %(levelname)s - "
"%(module)s.%(funcName)s:%(lineno)d - %(message)s"
)
)
log = logging.getLogger("parser")
log.setLevel(logging.ERROR)
class Pattern(object):
def __init__(self, data):
self.data = data
self.load_yaml()
self.setup_regexes()
self.setup_patterns()
def load_yaml(self):
import yaml
if isinstance(self.data, dict):
self.config = self.data
else:
self.config = yaml.safe_load(self.data)
def setup_regexes(self):
self.regexes = {}
if self.config:
for regexp in self.config.get("regexes", []):
flags = []
if regexp.get("multiline"):
flags.append(regex_module.MULTILINE)
self.regexes[regexp.get("name")] = regex_module.compile(
r"{0}".format(regexp.get("regex")), *flags
)
def setup_patterns(self):
self._patterns = self.config.get("patterns", {})
if self._patterns:
for key in self._patterns:
for p in self._patterns[key]:
if p["pattern"] in self.regexes:
p["pattern"] = self.regexes[p["pattern"]]
if p["logstash"] in self.regexes:
p["logstash"] = self.regexes[p["logstash"]]
@property
def patterns(self):
return self._patterns
def line_match(pat, line, exclude=None):
if isinstance(pat, str):
return pat in line
found = pat.search(line)
if not found:
return False
if found.groups():
if exclude:
if any(i in found.group(1) for i in exclude):
return False
return found.group(1)
return True
def parse(text_file, patterns):
ids = []
msgs = []
if text_file.split(".")[-1] == "gz":
open_func = gzip.open
else:
open_func = open
with open_func(text_file, "rt") as finput:
text = finput.read()
for p in patterns:
line_matched = line_match(p["pattern"], text, exclude=p.get("exclude"))
if line_matched:
log.debug("Found pattern %s in file %s", repr(p), text_file)
ids.append(p["id"])
msgs.append(p["msg"].format(line_matched))
return list(set(ids)), list(set(msgs))
def format_msg_filename(text):
for s in (
" ",
":",
".",
"/",
",",
"'",
):
text = text.replace(s, "_")
text = text[:100]
return "_" + text.rstrip("_") + ".log"
def main():
module = AnsibleModule(
argument_spec=dict(
config=dict(type="dict", default={}),
files=dict(type="dict", required=True),
result=dict(type="path"),
result_file_dir=dict(type="path"),
)
)
if not module.params["files"]:
module.fail_json(msg="Files for logs parsing have to be provided!")
existing_files = []
for pattern_file in module.params["files"]:
file_ = module.params["files"][pattern_file]
if os.path.exists(file_):
existing_files.append(file_)
if not existing_files:
results = {"processed_files": [], "changed": False}
module.exit_json(**results)
dict_patterns = deepcopy(module.params["config"])
pattern = Pattern(dict_patterns)
PATTERNS = pattern.patterns
for name in module.params["files"]:
if name not in PATTERNS:
module.fail_json(
msg="File name %s wasn't found in [%s]"
% (name, ", ".join(list(PATTERNS.keys())))
)
messages, tags = [], []
for name, file_ in module.params["files"].items():
if module.params["files"][name] not in existing_files:
continue
ids, msgs = parse(file_, PATTERNS[name])
found = [i for i in PATTERNS[name] if i["id"] in ids]
msg_tags = [i["tag"] for i in found if i.get("tag")]
messages += msgs
tags += msg_tags
messages = list(set(messages))
tags = list(set(tags))
if "infra" in tags:
reason = "infra"
elif "code" in tags:
reason = "code"
else:
reason = "unknown"
text = " ".join(messages) or "No failure reason found"
file_name = format_msg_filename(text)
result = {"changed": True, "processed_files": existing_files}
result.update({"message": text})
result.update({"tags": tags})
if module.params["result"] and messages:
try:
with open(module.params["result"], "w") as f:
f.write(text + "\n")
f.write("Reason: " + reason + "\n")
result.update({"file_written": module.params["result"]})
except Exception as e:
module.fail_json(
msg="Can't write result to file %s: %s"
% (module.params["result"], str(e))
)
if module.params["result_file_dir"]:
log_file = os.path.join(module.params["result_file_dir"], file_name)
try:
with open(log_file, "w") as f:
f.write(text + "\n")
f.write("Reason: " + reason + "\n")
result.update({"file_name_written": log_file})
except Exception as e:
module.fail_json(
msg="Can't write result to file %s: %s" % (log_file, str(e))
)
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -1,2 +0,0 @@
pbr>=1.6
ansible-core>=2.11,<2.12

View File

@ -1,2 +0,0 @@
collections:
- name: ansible.posix # needed by synchronize

View File

@ -1,510 +0,0 @@
---
# formally in tq common and tqe extras-common
# zuul does not allow use of lookup env plugin (security), so we cannot use them
# defaults but we can load non-zuul values from vars.
local_working_dir: "{{ zuul_work_dir | default('~') }}/.quickstart"
artcl_collect_dir: "{{ local_working_dir }}/collected_files"
working_dir: "/home/{{ undercloud_user }}"
undercloud_user: stack
artcl_build_tag: "{{ zuul.build | default('') }}" # overriden by vars/unsecure.yml
artcl_collect: true
artcl_collect_list:
- /var/lib/container-config-scripts/
- /var/lib/heat-config/
- /var/lib/kolla/config_files
- /var/lib/mistral/
- /var/lib/nova/instances/*/console.log
- /var/lib/oooq-images/*/*.log
- /var/lib/oooq-images/*/*.sh
- /var/lib/pacemaker/cib/cib*
- /var/lib/pacemaker/pengine/pe-input*
- /var/log/atop*
- /var/log/dmesg.txt
- /var/log/host_info.txt
- /var/log/journal.txt
- /var/log/postci.txt
- /var/log/secure
- /var/log/bootstrap-subnodes.log
- /var/log/unbound.log
- /var/log/{{ ansible_pkg_mgr }}.log
- /var/log/cloud-init*.log
- /var/log/aodh/
- /var/log/audit/
- /var/log/barbican/
- /var/log/ceilometer/
- /var/log/ceph/
- /var/log/cinder/
- /var/log/cloudkitty/
- /var/log/cluster/
- /var/log/config-data/
- /var/log/congress/
- /var/log/containers/
- /var/log/deployed-server-enable-ssh-admin.log
- /var/log/deployed-server-os-collect-config.log
- /var/log/designate/
- /var/log/dmesg/
- /var/log/extra/
- /var/log/ec2api/
- /var/log/glance/
- /var/log/gnocchi/
- /var/log/heat/
- /var/log/heat-launcher/
- /var/log/horizon/
- /var/log/httpd/
- /var/log/ironic/
- /var/log/ironic-inspector/
- /var/log/libvirt/
- /var/log/keystone/
- /var/log/manila/
- /var/log/mariadb/
- /var/log/mistral/
- /var/log/monasca/
- /var/log/murano/
- /var/log/neutron/
- /var/log/nova/
- /var/log/novajoin/
- /var/log/octavia/
- /var/log/openvswitch/
- /var/log/ovn/
- /var/log/pacemaker/
- /var/log/panko/
- /var/log/qdr/
- /var/log/rabbitmq/
- /var/log/redis/
- /var/log/sahara/
- /var/log/sensu/
- /var/log/swift/
- /var/log/tacker/
- /var/log/tempest/
- /var/log/trove/
- /var/log/tripleo-container-image-prepare.*.log
- /var/log/vitrage/
- /var/log/watcher/
- /var/log/zaqar/
- /var/tmp/sosreport*
- /etc/
- /home/*/undercloud-ansible-*
- /home/*/.instack/install-undercloud.log
- /home/*/*rc
- /home/*/*rc.v3
- /home/*/*.log
- /home/*/*.json
- /home/*/*.conf
- /home/*/*.yml
- /home/*/*.yaml
- /home/*/*.sh
- /home/*/*.rst
- /home/*/*.pem
- /home/*/network-environment.yaml
- /home/*/skip_file
- /home/*/*.subunit
- /home/*/tempest/*.xml
- /home/*/tempest/*.html
- /home/*/tempest/*.log
- /home/*/tempest/etc/*.conf
- /home/*/tempest/*.subunit
- /home/*/tempest/*.json
- /home/*/tripleo-heat-installer-templates/
- /home/*/local_tht/
- /home/*/gating_repo.tar.gz
- /home/*/browbeat/
- /usr/share/openstack-tripleo-heat-templates/
- /home/*/overcloud-deploy
- /home/*/tripleo-heat-templates/
- /home/*/.ssh/config
- /tmp/tripleoclient*
# The next 2 items are temporary until config-download is executed
# from a Mistral workflow (WIP in Queens)
- /home/*/inventory
- /home/*/inventories
- /home/*/tripleo-config-download/
artcl_exclude_list:
- /etc/udev/hwdb.bin
- /etc/puppet/modules
- /etc/project-config
- /etc/services
- /etc/selinux/targeted
- /etc/pki/ca-trust/extracted
- /etc/alternatives
- /var/log/journal
- overlay*
- root
- console*primary.log
- anaconda*
# if true, a rsync filter file is generated for rsync to collect files,
# if false, find is used to generate list of files to collect for rsync.
artcl_rsync_collect_list: true
artcl_find_maxdepth: 4
# size in MBs
artcl_find_max_size: 256
# os specific values loaded from tasks/main.yaml
artcl_collect_pkg_list: []
# In upstream logs the compression is handled
# by the storage servers themselves and this
# can be false. In other storage servers
# the role must compress files.
artcl_gzip: false
## publishing related vars
artcl_publish: false
artcl_env: default
artcl_readme_path: "{{ working_dir }}/src/opendev.org/openstack/tripleo-ci/docs/tripleo-quickstart-logs.html"
artcl_readme_file: "{{ artcl_collect_dir }}/README.html"
artcl_txt_rename: false
# give up log upload after 30 minutes
artcl_publish_timeout: 1800
artcl_artifact_url: "file://{{ local_working_dir }}"
artcl_full_artifact_url: "{{ artcl_artifact_url }}/{{ artcl_build_tag }}/"
artcl_use_rsync: false
artcl_rsync_use_daemon: false
artcl_container_collect_timeout: 1800 # 30 mins
artcl_use_swift: false
# clean up the logs after 31 days
artcl_swift_delete_after: 2678400
artcl_swift_container: logs
artcl_use_zuul_swift_upload: false
artcl_zuul_swift_upload_path: /usr/local/bin
artcl_collect_sosreport: false
artcl_sosreport_options: "--batch"
# User defined commands to be executed, combined with default ones.
artcl_commands_extras: {}
# Used to determine which ignore_errors strategy to use. Defaults to true
# but for testing purposes we may want to make it false, to avoid false
# positives.
artcl_ignore_errors: true
# Implicit commands executed by the role. Keep the dict sorted.
artcl_commands:
system:
cpuinfo:
cmd: |
cat /proc/cpuinfo
echo ""
grep -s -H '' /sys/module/{kvm_intel,kvm_amd}/parameters/nested
capture_file: /var/log/extra/cpuinfo.txt
dmesg:
cmd: dmesg
meminfo:
cmd: cat /proc/meminfo
capture_file: /var/log/extra/meminfo.txt
pcs:
cmd: |
if type pcs &>/dev/null; then
echo "+ pcs status"
pcs status
echo "+ pcs config"
pcs config
echo "+ pcs cluster cib"
pcs cluster cib
fi
pcs_cpu_throttle:
cmd: |
if type pcs &>/dev/null; then
echo "+ high CPU throttling events"
grep throttle_check_thresholds /var/log/pacemaker/pacemaker.log
fi
chrony:
cmd: |
echo "+ chrony tracking"
chronyc tracking
echo "+ chrony sources"
chronyc sources -a -v
echo "+ chrony source stats"
chronyc sourcestats -a -v
echo "+ chrony activity"
chronyc activity
ipa:
cmd: |
if type ipa &>/dev/null; then
echo "+ ipa env"
ipa env
echo "+ ipa config-show"
ipa config-show --all
echo "+ ipa dnsconfig-show"
ipa dnsconfig-show --all
fi
swaps:
cmd: cat /proc/swaps
capture_file: /var/log/extra/swaps.txt
vmstat:
cmd: vmstat -s
ps:
cmd: ps axfo %mem,size,rss,vsz,pid,args
rpm-list:
cmd: rpm -qa | sort -f
package-list-installed:
cmd: "{{ ansible_pkg_mgr }} list installed"
repolist:
cmd: "{{ ansible_pkg_mgr }} repolist -v"
dnf-module-list:
cmd: "{{ ansible_pkg_mgr }} module list"
when: ansible_distribution_major_version|int >= 8
dnf-module-list-enabled:
cmd: "{{ ansible_pkg_mgr }} module list --enabled"
when: ansible_distribution_major_version|int >= 8
record_available_packages:
# the timeout is like a fail-safe from collect_logs point of view,
# we encountered an issue when repolist query took several minutes
# which lead to timeouts and unfinished log collections
cmd: |
timeout 120 repoquery -a --qf "%{ui_from_repo} %{name}" | sort
capture_file: /var/log/extra/all_available_packages.txt
selinux:
cmd: |
/usr/sbin/sestatus -v
/usr/sbin/sestatus -b
installed_crons:
cmd: |
for user in $(cut -f1 -d':' /etc/passwd); do
echo $user; crontab -u $user -l | grep -v '^$\|^\s*\#\|^\s*PATH'
done
import-delorean:
# used by OSP Release Engineering to import into internal builds
cmd: >
repoquery --disablerepo='*' --enablerepo='delorean'
-a --qf '%{sourcerpm}'|sort -u|sed 's/.src.rpm//g'
import-delorean-deps:
# used by OSP Release Engineering to import into internal builds
cmd: >
repoquery --disablerepo='*' --enablerepo='delorean-*-deps'
-a --qf '%{sourcerpm}'|sort -u|sed 's/.src.rpm//g'
failed_services:
cmd: >
systemctl -t service --failed --no-legend | awk '{print $1}'
| xargs -r -n1 journalctl -u
lsof:
cmd: >
lsof -P -n &> /var/log/extra/lsof.txt
pstree:
cmd: pstree -p
sysctl:
cmd: sysctl -a
haproxy-stats:
cmd: >
pgrep haproxy && \
test -S /var/lib/haproxy/stats && \
echo 'show info;show stat;show table' | socat /var/lib/haproxy/stats stdio || \
echo "No HAProxy or no socket on host"
lsmod:
cmd: lsmod
lspci:
cmd: lspci
pip:
cmd: "{{ ansible_python.executable }} -m pip list"
lvm:
cmd: |
vgs
pvs
lvs
disk:
cmd: |
df -h
shell_variables:
cmd: |
set
services:
cmd: |
systemctl list-units --full --all
systemctl status "*"
selinux_denials:
cmd: >
grep -i denied /var/log/audit/audit*
selinux_consolidated_avc:
cmd: >
/usr/bin/perl /usr/local/bin/consolidate-avc.pl /var/log/extra/selinux_denials.txt
selinux_denials_detail:
cmd: >
sealert -a /var/log/extra/selinux_consolidated_avc.txt
seqfaults:
cmd: >
grep -v ansible-command /var/log/messages | grep segfault
oom-killers.txt:
cmd: |
grep -v ansible-command /var/log/messages | grep oom-killer
delorean-logs:
cmd: >
if [[ -e /home/{{ undercloud_user }}/DLRN/data/repos ]]; then
rm -rf /tmp/delorean_logs && mkdir /tmp/delorean_logs;
find /home/{{ undercloud_user }}/DLRN/data/repos/ -name '*.log' -exec cp --parents \{\} /tmp/delorean_logs/ \; ;
find /home/{{ undercloud_user }}/DLRN/ -name 'projects.ini' -exec cp \{\} /tmp/delorean_logs/ \; ;
find /tmp/delorean_logs -name '*.log' -exec gzip \{\} \; ;
find /tmp/delorean_logs -name '*.log.gz' -exec sh -c 'x="{}"; mv "$x" "${x%.log.gz}.log.txt.gz"' \; ;
rm -rf {{ artcl_collect_dir }}/delorean_logs && mkdir {{ artcl_collect_dir }}/delorean_logs;
mv /tmp/delorean_logs/home/{{ undercloud_user }}/DLRN/data/repos/* {{ artcl_collect_dir }}/delorean_logs/;
mv /tmp/delorean_logs/projects.ini {{ artcl_collect_dir }}/delorean_logs/;
fi
capture_disable: true
journal:
cmd: journalctl --since=-4h --lines=100000
journal_errors:
cmd: journalctl --since=-4h -p err --output=short-iso
rabbitmq:
cmd: |
if type pcs &>/dev/null; then
echo "+ rabbitmq cookie"
podman exec rabbitmq-bundle-podman-0 rabbitmqctl eval 'erlang:get_cookie().'
echo "+ rabbitmq report"
podman exec rabbitmq-bundle-podman-0 rabbitmqctl report
fi
slow_requests:
cmd: >
echo "+ slow req_ids";
find /var/log/containers -type f -name '*.log'
-not -path '*/stdouts/*' -not -path '*httpd*'
-exec grep -HE '(time:\s?|held |waited )([3-9][0-9]\.|[0-9]{3,}\.)' {} \;;
echo "+ slow haproxy api calls";
grep -E '([0-9]+\/){3,}[3-9][0-9]{4,}/?' /var/log/containers/haproxy/haproxy.log
monitoring: {}
network:
ovn:
cmd: |
if type ovs-vsctl &>/dev/null; then
function pod_exec() {
timeout -k 10 5 sudo podman exec ovn_controller $@
}
function sbctl() {
SBDB=$(sudo ovs-vsctl get open . external_ids:ovn-remote | sed -e 's/\"//g');
pod_exec ovn-sbctl --db=$SBDB $1
}
function nbctl() {
NBDB=$(sudo ovs-vsctl get open . external_ids:ovn-remote | sed -e 's/\"//g' | sed -e 's/6642/6641/g');
pod_exec ovn-nbctl --db=$NBDB $1
}
echo "Output of ovs-vsctl get open . external_ids"
pod_exec ovs-vsctl get open . external_ids
echo "\nOutput of ovn-sbctl show"
sbctl show
echo "\nOutput of ovn-nbctl show"
nbctl show
echo "\nOutput of ovn-sbctl lflow-list"
sbctl lflow-list
fi
openstack:
baremetal_list:
cmd: |
if [[ -e {{ working_dir }}/stackrc ]]; then
source {{ working_dir }}/stackrc
openstack baremetal node list --long
fi
nova_list:
cmd: |
if [[ -e {{ working_dir }}/stackrc ]]; then
source {{ working_dir }}/stackrc
openstack server list --long
fi
openstack-status:
cmd: |
if type openstack-status &> /dev/null; then
. ~/keystonerc_admin
openstack-status
fi
when: "'controller' in inventory_hostname"
container: {}
# Doc generation specific vars
artcl_gen_docs: false
artcl_create_docs_payload:
included_deployment_scripts: []
included_static_docs: []
table_of_contents: []
artcl_docs_source_dir: "{{ local_working_dir }}/share/ansible/roles/collect-logs/docs/source"
artcl_docs_build_dir: "{{ artcl_collect_dir }}/docs/build"
artcl_verify_sphinx_build: false
artcl_logstash_files:
- /home/*/container_image_build.log
- /home/*/deployed_server_prepare.txt
- /home/*/docker_journalctl.log
- /home/*/failed_deployment_list.log
- /home/*/hostname.sh.log
- /home/*/install_built_repo.log
- /home/*/install_packages.sh.log
- /home/*/install-undercloud.log
- /home/*/ironic-python-agent.log
- /home/*/nova_actions_check.log
- /home/*/overcloud_create_ssl_cert.log
- /home/*/overcloud_custom_tht_script.log
- /home/*/overcloud_delete.log
- /home/*/overcloud_deploy.log
- /home/*/overcloud_deploy_post.log
- /home/*/overcloud_failed_prepare_resources.log
- /home/*/overcloud-full.log
- /home/*/build-err.log
- /home/*/overcloud_image_build.log
- /home/*/overcloud_image_upload.log
- /home/*/overcloud_import_nodes.log
- /home/*/overcloud_introspect.log
- /home/*/overcloud_prep_containers.log
- /home/*/overcloud_prep_images.log
- /home/*/overcloud_prep_network.log
- /home/*/overcloud_validate.log
- /home/*/pkg_mgr_mirror_error.log
- /home/*/pkg_mgr_mirror.log
- /home/*/repo_setup.log
- /home/*/repo_setup.sh.*.log
- /home/*/standalone_deploy.log
- /home/*/tempest.log
- /home/*/undercloud_custom_tht_script.log
- /home/*/undercloud_install.log
- /home/*/undercloud_reinstall.log
- /home/*/*update*.log
- /home/*/*upgrade*.log
- /home/*/upgrade-undercloud-repo.sh.log
- /home/*/validate-overcloud-ipmi-connection.log
- /home/*/vxlan_networking.sh.log
- /home/*/workload_launch.log
- /var/log/bootstrap-subnodes.log
- /var/log/ipaserver-install.log
- /var/log/tripleo-container-image-prepare.log
- /var/log/extra/journal_errors.txt
- /var/log/extra/pcs_cpu_throttle.txt
- /var/log/ceph/cephadm.log
- /var/log/extra/errors.txt
# ara_graphite_server: graphite.tripleo.org
# if ara_enabled is false, no ara tasks will be executed
ara_enabled: true
ara_overcloud_db_path: "/var/lib/mistral/overcloud/ara_overcloud.sqlite"
ara_generate_html: true
ara_only_successful_tasks: true
ara_tasks_map:
"overcloud-deploy : Deploy the overcloud": overcloud.deploy.seconds
"undercloud-deploy : Install the undercloud": undercloud.install.seconds
"build-images : run the image build script (direct)": overcloud.images.seconds
"overcloud-prep-images : Prepare the overcloud images for deploy": prepare_images.seconds
"validate-simple : Validate the overcloud": overcloud.ping_test.seconds
"validate-tempest : Execute tempest": overcloud.tempest.seconds
collect_log_types:
- system
- monitoring
- network
- openstack
- container
# This set sova to use the specified json file instead of downloading from
# internet. Right now it is used by molecule, only set this if you do not want
# to use the official sova-config file.
# sova_config_file: "/path/to/sova/json/file
# InfluxDB module settings
influxdb_only_successful_tasks: true
influxdb_measurement: test
# influxdb_url:
influxdb_port: 8086
influxdb_user:
influxdb_password:
influxdb_dbname: testdb
influxdb_data_file_path: "{{ local_working_dir }}/influxdb_data"
influxdb_create_data_file: true
odl_extra_log_dir: /var/log/extra/odl
odl_extra_info_log: "{{ odl_extra_log_dir }}/odl_info.log"

View File

@ -1,121 +0,0 @@
#!/usr/bin/env bash
set -x
get_engine() {
if ! command -v docker &>/dev/null ; then echo "podman"; exit; fi
if ! command -v podman &>/dev/null ; then echo "docker"; exit; fi
if ! systemctl is-active docker &>/dev/null ; then echo "podman"; exit; fi
if [[ -z $(docker ps --all -q) ]]; then
echo "podman";
exit;
fi
if [[ -z $(podman ps --all -q) ]]; then
echo "docker"; exit;
fi
echo 'podman'
}
container_cp() {
${engine} cp "${1}:${2}" "$3"
};
engine=$(get_engine)
echo "${engine} was detected."
BASE_CONTAINER_EXTRA=/var/log/extra/${engine};
mkdir -p "$BASE_CONTAINER_EXTRA";
ALL_FILE=$BASE_CONTAINER_EXTRA/${engine}_allinfo.log;
CONTAINER_INFO_CMDS=(
"${engine} ps --all"
"${engine} images"
"${engine} version"
"${engine} info"
"${engine} volume ls"
"${engine} network ls"
);
for cmd in "${CONTAINER_INFO_CMDS[@]}"; do
{
echo "+ $cmd"
$cmd
echo ""
echo ""
} >> "$ALL_FILE"
done;
# Get only failed containers, in a dedicated file
${engine} ps -a | grep -vE ' (IMAGE|Exited \(0\)|Up) ' &>> /var/log/extra/failed_containers.log;
# Get inspect infos for all containers even the ones not running.
for cont in $(${engine} ps -a | awk '{print $NF}' | grep -v NAMES); do
INFO_DIR=$BASE_CONTAINER_EXTRA/containers/${cont};
mkdir -p "$INFO_DIR";
(
${engine} inspect "$cont";
) &> "$INFO_DIR/${engine}_info.log";
done;
# Get other infos for running containers
for cont in $(${engine} ps | awk '{print $NF}' | grep -v NAMES); do
INFO_DIR=$BASE_CONTAINER_EXTRA/containers/${cont};
mkdir -p "$INFO_DIR";
(
if [ "${engine}" = 'docker' ]; then
${engine} top "$cont" auxw;
# NOTE(cjeanner): `podman top` does not support `ps` options.
elif [ "${engine}" = 'podman' ]; then
${engine} top "$cont";
fi
${engine} exec "$cont" vmstat -s
${engine} exec "$cont" ps axfo %mem,size,rss,vsz,pid,args
${engine} exec -u root "$cont" bash -c "\$(command -v dnf || command -v yum) list installed";
) &>> "$INFO_DIR/${engine}_info.log";
container_cp "$cont" /var/lib/kolla/config_files/config.json "$INFO_DIR/config.json";
# Capture rpms updated from more recent repos
update_repos="gating delorean-current"
if ls /etc/yum.repos.d/*-component.repo 1> /dev/null 2>&1; then
component_name=$(cat /etc/yum.repos.d/*-component.repo | grep "name=" | sed "s/name=//g")
update_repos="${update_repos} ${component_name}"
fi
echo "*** ${cont} rpm update info ***" >> "$BASE_CONTAINER_EXTRA/container_updates_info.log"
for repo in $update_repos; do
grep "@${repo}" "$INFO_DIR/${engine}_info.log" >> "$BASE_CONTAINER_EXTRA/container_updates_info.log"
done;
# NOTE(flaper87): This should go away. Services should be
# using a `logs` volume
# NOTE(mandre) Do not copy logs if the containers is bind mounting /var/log directory
if ! ${engine} inspect "$cont" | jq .[0].Mounts[].Source | grep -x '"/var/log[/]*"' >/dev/null 2>&1; then
container_cp "$cont" /var/log "$INFO_DIR/log";
BIND_DESTS=$(${engine} inspect "$cont" | jq .[0].Mounts[].Destination -r)
for path in $(echo "$BIND_DESTS" | grep "^/var/log" | sed -e "s#^/var/log/##g"); do
rm -rf "$INFO_DIR/log/$path"
echo "Omitting $INFO_DIR/log/$path in $cont because it is mounted from the host"
done
fi;
# Delete symlinks because they break log collection and are generally
# not useful
find "$INFO_DIR" -type l -delete;
done;
# NOTE(cjeanner) previous loop cannot have the "-a" flag because of the
# "exec" calls. So we just loop a second time, over ALL containers,
# in order to get all the logs we can. For instance, the previous loop
# would not allow to know why a container is "Exited (1)", preventing
# efficient debugging.
for cont in $(${engine} ps -a | awk '{print $NF}' | grep -v NAMES); do
INFO_DIR=$BASE_CONTAINER_EXTRA/containers/${cont};
mkdir -p "$INFO_DIR";
${engine} logs "$cont" &> "$INFO_DIR/stdout.log";
done;
# NOTE(flaper87) Copy contents from the logs volume. We can expect this
# volume to exist in a containerized environment.
# NOTE(cjeanner): Rather test the eXistenZ of the volume, as podman does not
# have such thing
if [ -d /var/lib/docker/volumes/logs/_data ]; then
cp -r /var/lib/docker/volumes/logs/_data "$BASE_CONTAINER_EXTRA/logs";
fi

View File

@ -1,44 +0,0 @@
# Copyright 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Usage:
# will use /var/log/audit/audit.log as source
# ./consolidate-avc.pl
# Will use another input
# ./consolidate-avc.pl /var/log/extras/denials.txt
use strict;
use warnings;
use List::Util qw'first';
my $logfile = shift // '/var/log/audit/audit.log';
open(AUDIT_LOG, $logfile) or die("Could not open file '${logfile}'.");
my @denials = ();
while( my $line = <AUDIT_LOG>) {
my @matched = $line =~ m{type=AVC.* denied \{([\w\s]+)\}.* scontext=([\w:]+)(:[,c0-9]+)? tcontext=([\w:,]+) tclass=([\w]+) permissive=[01]};
if (@matched) {
my $action = $matched[0];
my $scontext = $matched[1];
my $tcontext = $matched[3];
my $tclass = $matched[4];
my $matcher = "${action}_${scontext}_${tcontext}_${tclass}";
if (!first {$matcher eq $_} @denials) {
push(@denials, $matcher);
print $line;
}
}
}
close(AUDIT_LOG);

View File

@ -1,62 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Usage: openstack stack event list -f json overcloud | \
# heat-deploy-times.py [list of resource names]
# If no resource names are provided, all of the resources will be output.
from __future__ import absolute_import, division, print_function
import json
import sys
import time
__metaclass__ = type
def process_events(all_events, events):
times = {}
for event in all_events:
name = event["resource_name"]
status = event["resource_status"]
# Older clients return timestamps in the first format, newer ones
# append a Z. This way we can handle both formats.
try:
strptime = time.strptime(event["event_time"], "%Y-%m-%dT%H:%M:%S")
except ValueError:
strptime = time.strptime(event["event_time"], "%Y-%m-%dT%H:%M:%SZ")
etime = time.mktime(strptime)
if name in events:
if status == "CREATE_IN_PROGRESS":
times[name] = {"start": etime, "elapsed": None}
elif status == "CREATE_COMPLETE":
times[name]["elapsed"] = etime - times[name]["start"]
for name, data in sorted(
times.items(), key=lambda x: x[1]["elapsed"], reverse=True
):
elapsed = "Still in progress"
if times[name]["elapsed"] is not None:
elapsed = times[name]["elapsed"]
print("%s %s") % (name, elapsed)
if __name__ == "__main__":
stdin = sys.stdin.read()
all_events = json.loads(stdin)
events = sys.argv[1:]
if not events:
events = set()
for event in all_events:
events.add(event["resource_name"])
process_events(all_events, events)

View File

@ -1 +0,0 @@
../../plugins/modules

View File

@ -1,27 +0,0 @@
---
galaxy_info:
author: OpenStack
description: n Ansible role for aggregating logs from different nodes
company: Red Hat
license: Apache 2.0
min_ansible_version: 2.5
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 28
galaxy_tags:
- docker
- buildah
- container
- openstack
- tripleo
- packaging
- system
dependencies: []

View File

@ -1,95 +0,0 @@
---
# vars are defined molecule.yml to avoid repeading them for each play
- name: Create collection dir play
hosts: localhost
tasks:
- name: Create temp collection folder
file:
path: "{{ local_working_dir }}"
state: directory
mode: 0755
# Assure we do not have leftovers from previous runs there, not needed in production.
- name: Remove collected_files folder
file:
path: "{{ local_working_dir }}/collected_files"
state: absent
- name: "Converge collect play"
hosts: all
strategy: free
vars:
expected:
- 'cmd': 'cat /proc/cpuinfo'
'capture_file': '/var/log/extra/cpuinfo.txt'
'name': 'cpuinfo'
'group': 'system'
- 'cmd': 'cat /proc/meminfo'
'capture_file': '/var/log/extra/meminfo.txt'
'name': 'meminfo'
'group': 'system'
- 'cmd': 'cat /proc/swaps'
'capture_file': '/var/log/extra/swaps.txt'
'name': 'swaps'
'group': 'system'
tasks:
# brief call used a very short override artcl_commands, enough to validate
# that the combining of the commands works. Later we import the role with
# its default artcl_commands in order to test these commands, too.
- name: "Include collect_logs :: collect (brief)"
vars:
artcl_collect: true
artcl_commands:
system:
cpuinfo:
cmd: cat /proc/cpuinfo
capture_file: /var/log/extra/cpuinfo.txt
meminfo:
cmd: cat /proc/meminfo
capture_file: /var/log/extra/meminfo.txt
swaps:
cmd: cat /proc/swaps
capture_file: /var/log/extra/swaps.txt
include_role:
name: collect_logs
- name: Verify expected combined commands
assert:
that: artcl_commands_flatten['data'] == expected
fail_msg: |
artcl_commands_flatten had unexpected value {{ artcl_commands_flatten }}
success_msg: artcl_commands_flatten had correct value
- name: Verify that expected files where collected and they are not empty
delegate_to: localhost
stat:
path: "{{ local_working_dir }}/collected_files/{{ inventory_hostname }}{{ item.capture_file }}"
register: st
failed_when: not st.stat.exists or st.stat.size == 0
loop: "{{ expected }}"
- name: "Include ansible-role-collect-logs :: collect (full)"
vars:
artcl_collect: true
include_role:
name: collect_logs
- name: "Converge publish play"
hosts: localhost
tasks:
- name: "Include ansible-role-collect-logs :: publish"
vars:
# disabling collect here is key for testing because collection needs
# sudo on targeted hosts, which is not available on molecule, zuul and
# some development environments.
artcl_collect: false
artcl_publish: true
include_role:
name: collect_logs
- name: Display stats
debug:
msg: |
Collected files should be under {{ local_working_dir }}/collected_files

View File

@ -1,44 +0,0 @@
---
driver:
name: podman
log: true
platforms:
# - name: centos7
# image: quay.io/pycontribs/centos:centos7
# pre_build_image: true
- name: centos8
# image below is based on official quay.io/centos/centos:stream8 but
# it has python preinstalled on it.
image: quay.io/pycontribs/centos:stream8
pre_build_image: true
# - name: debian
# image: quay.io/pycontribs/python:3.8-slim-buster
# pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
interpreter_python: auto
forks: 50
stdout_callback: yaml
timeout: 30
inventory:
group_vars:
all:
local_working_dir: "{{ lookup('env', 'TOX_ENV_DIR') or '~/.cache' }}/log"
artcl_ignore_errors: false
verifier:
name: ansible
scenario:
# custom because "idempotence" is not yet supported by the role
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- verify
- cleanup
- destroy

View File

@ -1,17 +0,0 @@
---
- name: Mock some logs
hosts: all
become: true
tasks:
- name: Generate bootstrap-subnodes.log
copy:
content: |
2000-00-00T00:00:00.000Z | this is sparta
dest: /var/log/bootstrap-subnodes.log
mode: 0644
- name: Remove /var/log/extra/logstash.txt
file:
path: /var/log/extra/logstash.txt
state: absent

View File

@ -1,8 +0,0 @@
---
- name: Validate "/var/log/extra/logstash.txt
hosts: all
tasks:
- name: Check logstash.txt contains expected data
command: grep -q "this is sparta" /var/log/extra/logstash.txt
changed_when: false

View File

@ -1,99 +0,0 @@
---
- name: Converge
hosts: all
tasks:
- name: "Download Infrared"
git:
repo: "https://github.com/redhat-openstack/infrared.git"
version: "master"
dest: "{{ infrared_location }}"
update: true
- name: "Create Infrared venv"
pip:
name:
- pbr
- pip
- setuptools
virtualenv: "{{ infrared_venv }}"
- name: "Install Infrared"
# this task is always changed, the problem is on pip module side:
# https://github.com/ansible/ansible/issues/28952
pip:
name: "."
virtualenv: "{{ infrared_venv }}"
chdir: "{{ infrared_location }}"
- name: "Create infrared_plugin dir"
file:
path: "{{ infrared_location }}/infrared_plugin"
state: directory
mode: 0755
- name: "Copy ansible-role-collect-logs to test host"
synchronize:
src: "{{ playbook_dir }}/../../../../"
dest: "{{ ansible_env.HOME }}/artcl-src"
rsync_opts:
- "--exclude=.tox"
- name: "Install ansible-role-collect-logs plugin"
shell: |
export PATH=$PATH:/usr/local/sbin:/usr/sbin
source {{ infrared_venv }}/bin/activate
ir plugin add {{ ansible_env.HOME }}/artcl-src --src-path infrared_plugin
args:
executable: /bin/bash
register: plugin_install_output
changed_when: true
- name: "Debug: output from plugin installation task main playbook"
debug:
msg: "{{ plugin_install_output }}"
- name: "Create an empty dummy file"
file:
path: "{{ item }}"
state: touch
mode: 0644
with_items:
- "/tmp/dummy.log"
- "/tmp/append.log"
- "/tmp/exclude.log"
- "/tmp/exclude_append.log"
- "/tmp/config.conf"
- "/tmp/just_file"
changed_when: false
- name: "Create a dummy file of 1MB"
command: dd if=/dev/urandom of=/tmp/1MB_dummy.log bs=1MB count=1
args:
creates: "/tmp/1MB_dummy.log"
changed_when: false
- name: "Run infrared ansible-role-collect-logs"
become: false
shell: |
export PATH=$PATH:/usr/local/sbin:/usr/sbin
source {{ infrared_venv }}/bin/activate
ir ansible-role-collect-logs --openstack_nodes localhost \
--collect_log_types "testing" \
--artcl_collect_dir {{ infrared_location }}/collected_files_test \
--artcl_collect_list /tmp/*.log,/tmp/just_file \
--artcl_collect_list_append /tmp/config.conf \
--artcl_exclude_list /tmp/exclude.log \
--artcl_exclude_list_append /tmp/exclude_append.log \
--artcl_gzip true \
--artcl_rsync_collect_list false \
--local_working_dir "{{ infrared_location }}" \
--disable_artifacts_cleanup true
args:
executable: /bin/bash
register: output_collection
changed_when: true
- name: "Debug collection output"
debug:
msg: "{{ output_collection }}"

View File

@ -1,47 +0,0 @@
---
driver:
name: podman
log: true
platforms:
# - name: centos7
# image: quay.io/pycontribs/centos7
# pre_build_image: true
- name: centos8
# image below is based on official quay.io/centos/centos:stream8 but
# it has python preinstalled on it.
image: quay.io/pycontribs/centos:stream8
pre_build_image: true
# - name: debian
# image: quay.io/pycontribs/3.8-slim-buster
# pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
interpreter_python: auto
forks: 50
stdout_callback: yaml
timeout: 30
inventory:
group_vars:
all:
local_working_dir: "{{ lookup('env', 'TOX_ENV_DIR') or '~/.cache' }}/log"
artcl_gzip: true
artcl_min_size: 500000 # ~0.5mb
infrared_location: "~/ir"
infrared_venv: "{{ infrared_location }}/.infrared"
verifier:
name: ansible
scenario:
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- converge
- verify
- cleanup
- destroy
markers:
- xfail # https://projects.engineering.redhat.com/browse/RHOSINFRA-4174

View File

@ -1,84 +0,0 @@
---
- name: Converge
hosts: all
tasks:
- name: "List available plugins"
shell: |
export PATH=$PATH:/usr/local/sbin:/usr/sbin
source {{ infrared_venv }}/bin/activate
ir plugin list
args:
executable: /bin/bash
register: plugin_output
changed_when: false
- name: "Verify playbook list plugins output"
debug:
msg: "{{ plugin_output }}"
- name: "Check if ansible-role-collect-logs is present"
fail:
msg: "ansible-role-collect-logs not installed"
when: "'ansible-role-collect-logs' not in plugin_output.stdout"
- name: Get directory with collected log files
stat:
path: "{{ infrared_location }}/collected_files_test"
register:
collected_files_dir
- name: Ensure directory with collected log files is created
assert:
that:
- collected_files_dir.stat.exists
- name: Get the stats of collected files
stat:
path: "{{ item }}"
register: collected_files_stats
loop:
- "{{ infrared_location }}/collected_files_test/localhost/tmp/dummy.log.gz"
- "{{ infrared_location }}/collected_files_test/localhost/tmp/1MB_dummy.log.gz"
- "{{ infrared_location }}/collected_files_test/localhost/tmp/just_file.gz"
- "{{ infrared_location }}/collected_files_test/localhost/tmp/config.conf.gz"
- name: Ensure all files were collected
assert:
that:
- item.stat.exists
loop: "{{ collected_files_stats.results }}"
- name: Get the stats of excluded files
stat:
path: "{{ item }}"
register: excluded_files_stats
loop:
- "{{ infrared_location }}/collected_files_test/localhost/tmp/exclude.log.gz"
- "{{ infrared_location }}/collected_files_test/localhost/tmp/exclude_append.log.gz"
- name: Ensure excluded files were not collected
assert:
that:
- not item.stat.exists
loop: "{{ excluded_files_stats.results }}"
- name: Get tar files with logs
find: paths={{ infrared_location }}/collected_files_test/ patterns='*.tar'
register: tar_files
- name: Ensure .tar files with logs are deleted
assert:
that:
- tar_files.matched == 0
- name: Get file generated by find
stat:
path: /tmp/localhost-rsync-list
register:
rsync_list
- name: Ensure that find is used to generate list of files for rsync
assert:
that:
- rsync_list.stat.exists

View File

@ -1,12 +0,0 @@
---
- name: Converge
hosts: all
tasks:
- name: Include collect_logs
vars:
sova_config_file: "{{ ansible_user_dir }}/workspace/logs/sova_config.json"
include_role:
name: collect_logs
tasks_from: sova.yml
tags:
- molecule-idempotence-notest

View File

@ -1,15 +0,0 @@
---
driver:
name: delegated
options:
managed: false
ansible_connection_options:
ansible_connection: local
log: true
platforms:
- name: instance
provisioner:
name: ansible
verifier:
name: ansible

View File

@ -1,105 +0,0 @@
---
- name: Prepare
hosts: all
tasks:
- name: Prepare the console file directory
file:
path: '{{ ansible_user_dir }}/workspace/logs/'
state: directory
mode: 0755
- name: Create a sample console file
copy:
content: |
No valid host was found. There are not enough hosts
dest: '{{ ansible_user_dir }}/workspace/logs/quickstart_install.log'
mode: 0644
- name: Create a sample sova config file
copy:
content: |
{
"patterns": {
"bmc": [
{
"id": "Introspection_failed_cannot_get_IP_address",
"logstash": "",
"msg": "Introspection failed, cannot get IP address",
"pattern": "Introspection_failed_cannot_get_IP_address",
"tag": "infra"
}
],
"console": [
{
"id": "Not_enough_hosts",
"logstash": "",
"msg": "No valid host was found.",
"pattern": "Not_enough_hosts",
"tag": "info"
}
],
"errors": [
{
"id": "Buildah_pull_image_failed",
"logstash": "",
"msg": "Buildah pull image failed",
"pattern": "Buildah_pull_image_failed",
"tag": "info"
}
],
"ironic-conductor": [
{
"id": "Ironic_deployment_timeout",
"logstash": "",
"msg": "Ironic deployment timeout.",
"pattern": "Ironic_deployment_timeout",
"tag": "info"
}
],
"logstash": [
{
"id": "Ping_timeout_when_deploying_OC",
"logstash": "",
"msg": "Ping timeout when deploying OC.",
"pattern": "Ping_timeout_when_deploying_OC",
"tag": "infra"
}
],
"registry_log": [
{
"id": "Invalid_checksum_format",
"logstash": "",
"msg": "Invalid checksum format.",
"pattern": "Invalid_checksum_format",
"tag": "infra"
}
],
"selinux": [
{
"id": "selinux_denials_found",
"logstash": "",
"msg": "selinux denials found",
"pattern": "selinux_denials_found",
"tag": "code"
}
],
"syslog": [
{
"id": "service_FAIL",
"logstash": "",
"msg": "service FAIL",
"pattern": "service_FAIL",
"tag": "command_exe"
}
]
},
"regexes": [
{
"name": "Not_enough_hosts",
"regex": "No\\ valid\\ host\\ was\\ found\\.\\ There\\ are\\ not\\ enough\\ hosts"
}
]
}
dest: '{{ ansible_user_dir }}/workspace/logs/sova_config.json'
mode: 0644

View File

@ -1,33 +0,0 @@
---
- hosts: all
tasks:
- name: Ls {{ ansible_user_dir }}/workspace/logs/ # remove before merge
command: ls -la {{ ansible_user_dir }}/workspace/logs/
changed_when: false
register: test_output
- name: debug test # remove before merge
debug: var=test_output
- name: Ensure all files exists
stat:
path: "{{ item }}"
register: failures_file
loop:
- "{{ ansible_user_dir }}/workspace/logs/failures_file"
# TODO: Revert back after bug #1947133 is fixed
loop_control:
label: '{{ item.split("/")[-1] }}'
- name: Ensure all files exists - test
assert:
that:
- item.stat.exists
loop: "{{ failures_file.results }}"
- name: Check if we have strings in failures_file
command: grep 'No valid host was found' {{ ansible_user_dir }}/workspace/logs/failures_file
changed_when: false
# TODO: Revert back after bug #1947133 is fixed

View File

@ -1,42 +0,0 @@
# AWK script used to parse shell scripts, created during TripleO deployments,
# and convert them into rST files for digestion by Sphinx.
#
# General notes:
#
# - Only blocks between `### ---start_docs` and `### ---stop_docs` will be
# parsed
# - Lines containing `# nodocs` will be excluded from rST output
# - Lines containing `## ::` indicate subsequent lines should be formatted
# as code blocks
# - Other lines beginning with `## <anything else>` will have the prepended
# `## ` removed. (This is how you would add general rST formatting)
# - All other lines (including shell comments) will be indented by four spaces
/^### --start_docs/ {
for (;;) {
if ((getline line) <= 0)
unexpected_eof()
if (line ~ /^### --stop_docs/)
break
if (match(line, ".* #nodocs$"))
continue
if (substr(line, 0, 5) == "## ::") {
line = "\n::\n"
} if (substr(line, 0, 3) == "## ") {
line = substr(line, 4)
} else if (line != "") {
line = " "line
}
print line > "/dev/stdout"
}
}
function unexpected_eof() {
printf("%s:%d: unexpected EOF or error\n", FILENAME, FNR) > "/dev/stderr"
exit 1
}
END {
if (curfile)
close(curfile)
}

View File

@ -1,232 +0,0 @@
---
- become: true
ignore_errors: true
block:
- name: Ensure required rpms for logging are installed
package:
state: present
name: "{{ artcl_collect_pkg_list }}"
- name: Prepare directory with extra logs
file:
dest: /var/log/extra
state: directory
mode: 0755
- name: Create rsync filter file
template:
src: "odl_extra_logs.j2"
dest: "/tmp/odl_extra_logs.sh"
mode: 0644
- name: Determine commands to run
run_once: true
vars:
combined_cmds: "{{ artcl_commands | combine(artcl_commands_extras, recursive=True) }}"
# combines default dictionary with user defined one
# keeps only commands from groups mentioned in collect_log_types
flatten_nested_dict:
data: "{{ combined_cmds | dict2items|selectattr('key', 'in', collect_log_types) | list | items2dict }}"
register: artcl_commands_flatten
- name: install setools
ansible.builtin.package:
name:
- setools
- setroubleshoot
state: present
- name: install custom consolidation script
ansible.builtin.copy:
dest: /usr/local/bin/consolidate-avc.pl
src: consolidate-avc.pl
mode: 0555
- name: Run artcl_commands
# noqa 305
# noqa 102 :: No Jinja2 in when
vars:
capture_file: "{{ item.capture_file | default( item.name + '.txt') }}"
shell:
# redirection of output to log file, see https://ops.tips/gists/redirect-all-outputs-of-a-bash-script-to-a-file/
cmd: |
{% if not item.capture_disable | default(False) %}
exec >{% if not capture_file.startswith('/') %}/var/log/extra/{% endif %}{{ capture_file }} 2>&1
{% endif %}
{# do not put anything after the command #}
{{ item.cmd }}
warn: false
args:
chdir: /var/log/extra
executable: /bin/bash
changed_when: false
when: item.when | default(true)
loop: "{{ artcl_commands_flatten.data }}"
loop_control:
label: "{{ item.name }}"
# Change the collect_log_types if you don't want to collect
# some specific logs
- import_tasks: collect/container.yml
when: "'container' in collect_log_types"
- import_tasks: collect/system.yml
when: "'system' in collect_log_types"
- import_tasks: collect/network.yml
when: "'network' in collect_log_types"
- import_tasks: collect/monitoring.yml
when: "'monitoring' in collect_log_types"
- name: Set default collect list
set_fact:
collect_list: "{{ artcl_collect_list }} + {{ artcl_collect_list_append|default([]) }}"
- name: Override collect list
set_fact:
collect_list: "{{ artcl_collect_override[inventory_hostname] }}"
when:
- artcl_collect_override is defined
- artcl_collect_override[inventory_hostname] is defined
- name: Set default exclude list
set_fact:
artcl_exclude_list: "{{ artcl_exclude_list|default([]) }} + {{ artcl_exclude_list_append|default([]) }}"
- name: Create temp directory before gathering logs
file:
dest: "/tmp/{{ inventory_hostname }}"
state: directory
mode: 0755
- name: Create rsync filter file
template:
src: "rsync-filter.j2"
dest: "/tmp/{{ inventory_hostname }}-rsync-filter"
mode: 0644
when: artcl_rsync_collect_list|bool
# This task needs to be finished before generating find list of files
# to collect (Create find list file task) otherwise not all the container
# log files may be found and thus not collected later
- name: Wait for container logs collection if not finished yet
become: true
async_status:
jid: "{{ container_collection.ansible_job_id }}"
register: container_collection_result
until: container_collection_result.finished
delay: 10
retries: "{{ ((artcl_container_collect_timeout|int) / 10)|int }}"
when: "'container' in collect_log_types"
- name: Find and move logfiles generic case (typically without compression)
when: not (artcl_gzip | bool) or ( sanitize_lines is defined and sanitize_lines|length ) or ( artcl_rsync_collect_list|bool )
block:
- name: Create find list file
become: true
shell: >
find {{ collect_list|join(' ') }}
-maxdepth {{ artcl_find_maxdepth }}
-type f \
-size -{{ artcl_find_max_size }}M
{% if artcl_exclude_list is defined %}
-not -path {{ artcl_exclude_list|map('quote')|join(' -not -path ') }}
{% endif %}
-print0 > /tmp/{{ inventory_hostname }}-rsync-list
failed_when: false
when: not artcl_rsync_collect_list|bool
- name: Gather the logs to /tmp
become: true
shell: >
set -o pipefail &&
rsync --quiet --recursive --copy-links --prune-empty-dirs --ignore-errors
{% if artcl_rsync_collect_list|bool %}
--filter '. /tmp/{{ inventory_hostname }}-rsync-filter'
{% else %}
--from0 --files-from=/tmp/{{ inventory_hostname }}-rsync-list
{% endif %}
/ /tmp/{{ inventory_hostname }};
find /tmp/{{ inventory_hostname }} -type d -print0 | xargs -0 chmod 755;
find /tmp/{{ inventory_hostname }} -type f -print0 | xargs -0 chmod 644;
find /tmp/{{ inventory_hostname }} -not -type f -not -type d -delete;
{# chown can fail with: chown: invalid spec: '0:' #}
chown -R {{ ansible_user | default(ansible_effective_user_id) }}: /tmp/{{ inventory_hostname }} || true;
args:
executable: /bin/bash
changed_when: true
# See README section 'Sanitizing Log Strings'
- name: Sanitize logs to remove sensitive details
include_tasks: sanitize_log_strings.yaml
loop: "{{ sanitize_lines }}"
loop_control:
loop_var: outer_item
when: sanitize_lines is defined and sanitize_lines|length
# it makes sense to compress the logs prior
# to sending them over the wire to the
# node where they are collected by infra.
# Regardless of the file size.
- name: Compress the collected files if configured
when: artcl_gzip | bool
shell: gzip -r ./{{ inventory_hostname }}
args:
chdir: /tmp
warn: false
changed_when: true
tags:
- skip_ansible_lint
- name: Create gz compressed log files to the /tmp (special case)
when:
- artcl_gzip | bool
- not ( sanitize_lines is defined and sanitize_lines|length )
- not ( artcl_rsync_collect_list|bool )
block:
- name: On the fly compress copy
become: true
shell: >
find {{ collect_list|join(' ') }}
-maxdepth {{ artcl_find_maxdepth }}
-type f \
-size -{{ artcl_find_max_size }}M
{% if artcl_exclude_list is defined %}
-not -path {{ artcl_exclude_list|map('quote')|join(' -not -path ') }}
{% endif %}
-print0 |
xargs -0 -P 8 -I ITER sh -c 'mkdir -p "/tmp/{{ inventory_hostname }}$(dirname ITER)"; gzip -c "ITER" > "/tmp/{{ inventory_hostname }}/ITER.gz"'
failed_when: false
- name: Create tar archive of logs for faster copying # noqa: command-instead-of-module
shell:
cmd: tar cf {{ inventory_hostname }}.tar {{ inventory_hostname }};
chdir: /tmp
changed_when: true
- name: Fetch log archive (tar)
fetch:
src: "/tmp/{{ inventory_hostname }}.tar"
dest: "{{ artcl_collect_dir }}/{{ inventory_hostname }}.tar"
flat: true
validate_checksum: false
- name: Delete temporary log directory after collection
file:
path: "/tmp/{{ inventory_hostname }}"
state: absent
ignore_errors: true # noqa ignore-errors
- name: Extract the logs archive
unarchive:
src: "{{ artcl_collect_dir }}/{{ inventory_hostname }}.tar"
dest: "{{ artcl_collect_dir }}"
remote_src: true
delegate_to: localhost
- name: Remove logs archive
file:
path: "{{ artcl_collect_dir }}/{{ inventory_hostname }}.tar"
state: absent
delegate_to: localhost

View File

@ -1,49 +0,0 @@
---
- become: true
ignore_errors: true
block:
- name: check if ODL is enabled via docker
shell: docker ps | grep opendaylight_api
register: odl_container_enabled
- name: check if ODL is enabled via podman
shell: podman ps | grep opendaylight_api
register: odl_container_enabled
when: odl_container_enabled.rc != 0
- name: check if ODL is enabled via rpm # noqa: command-instead-of-module
shell: rpm -qa | grep opendaylight
register: odl_rpm_enabled
- name: Create ODL log directory
file:
dest: "{{ odl_extra_log_dir }}"
state: directory
mode: 0755
when: (odl_rpm_enabled.rc == 0) or (odl_container_enabled.rc == 0)
- name: Collect OVS outputs for ODL
shell: "bash /tmp/odl_extra_logs.sh" # noqa 305
when: (odl_rpm_enabled.rc == 0) or (odl_container_enabled.rc == 0)
- name: Collect ODL info and logs (RPM deployment)
shell: >
cp /opt/opendaylight/data/log/* /var/log/extra/odl/;
journalctl -u opendaylight > /var/log/extra/odl/odl_journal.log
when: odl_rpm_enabled.rc == 0
- name: Copy collection logs script for containers
copy:
src: collect-container-logs.sh
dest: /tmp/collect-container-logs.sh
mode: 0755
- name: Run container logs collection with timeout
command: >-
timeout --preserve-status -s 15 -k {{ (artcl_container_collect_timeout|int + 30)|string }}
{{ artcl_container_collect_timeout|string }} bash -x /tmp/collect-container-logs.sh
changed_when: true
async: "{{ artcl_container_collect_timeout }}"
poll: 0
register: container_collection

View File

@ -1,39 +0,0 @@
---
- become: true
ignore_errors: true
block:
- name: check for dstat log file
stat: path=/var/log/extra/dstat-csv.log
register: dstat_logfile
- name: kill dstat
shell: "pkill dstat" # noqa 305
become: true
when: dstat_logfile.stat.exists
- name: Get dstat_graph tool
git:
repo: "https://github.com/Dabz/dstat_graph.git"
dest: "/tmp/dstat_graph"
version: master
when: dstat_logfile.stat.exists
- name: Generate HTML dstat graphs if it exists
shell: "/tmp/dstat_graph/generate_page.sh /var/log/extra/dstat-csv.log > /var/log/extra/dstat.html"
when: dstat_logfile.stat.exists
args:
chdir: "/tmp/dstat_graph"
- name: Generate human-readable SAR logs
shell: "[[ -f /usr/lib64/sa/sa2 ]] && /usr/lib64/sa/sa2 -A"
- name: Ensure sos package is installed when collect sosreport(s)
package:
name: sos
state: present
when: artcl_collect_sosreport|bool
- name: Collect sosreport
command: >
sosreport {{ artcl_sosreport_options }}
when: artcl_collect_sosreport|bool

View File

@ -1,49 +0,0 @@
---
- become: true
ignore_errors: true
block:
- name: netstat -laputen
shell: "netstat -laputen &> /var/log/extra/netstat.txt"
- name: Collect network status info
shell: >
echo "netstat" > /var/log/extra/network.txt;
netstat -i &> /var/log/extra/network.txt;
for ipv in 4 6; do
echo "### IPv${ipv} addresses" >> /var/log/extra/network.txt;
ip -${ipv} a &>> /var/log/extra/network.txt;
echo "### IPv${ipv} routing" >> /var/log/extra/network.txt;
ip -${ipv} r &>> /var/log/extra/network.txt;
if [[ ! $(command -v nft) ]]; then
echo "### IPTables (IPv${ipv})" &>> /var/log/extra/network.txt;
test $ipv -eq 4 && iptables-save &>> /var/log/extra/network.txt;
test $ipv -eq 6 && ip6tables-save &>> /var/log/extra/network.txt;
echo "### IPTables Stats (IPv${ipv})" &>> /var/log/extra/network.txt;
test $ipv -eq 4 && iptables -vnL &>> /var/log/extra/network.txt;
test $ipv -eq 6 && ip6tables -vnL &>> /var/log/extra/network.txt;
fi
done;
command -v nft && nft list ruleset &>/var/log/extra/nftables.txt;
journalctl -p warning -t kernel -o short -g DROPPING --no-pager &> /var/log/extra/dropped-packets.txt;
(for NS in $(ip netns list | cut -f 1 -d " "); do
for ipv in 4 6; do
echo "==== $NS (${ipv})====";
echo "### IPv${ipv} addresses";
ip netns exec $NS ip -${ipv} a;
echo "### IPv${ipv} routing";
ip netns exec $NS ip -${ipv} r;
echo "### IPTables (IPv${ipv})";
test $ipv -eq 4 && ip netns exec $NS ip iptables-save;
test $ipv -eq 6 && ip netns exec $NS ip ip6tables-save;
done
PIDS="$(ip netns pids $NS)";
[[ ! -z "$PIDS" ]] && ps --no-headers -f --pids "$PIDS";
echo "";
done) &>> /var/log/extra/network-netns;
(for NB in $(ovs-vsctl show | grep Bridge |awk '{print $2}'); do
echo "==== Bridge name - $NB ====";
ovs-ofctl show $NB;
ovs-ofctl dump-flows $NB;
echo "";
done;
ovsdb-client dump) &> /var/log/extra/network-bridges;

View File

@ -1,39 +0,0 @@
---
- become: true
ignore_errors: "{{ artcl_ignore_errors }}"
block:
- name: Collect errors and rename if more than 10 MB
shell: >
grep -rE '^[-0-9]+ [0-9:\.]+ [0-9 ]*ERROR ' /var/log/ |
sed "s/\(.*\)\(20[0-9][0-9]-[0-9][0-9]-[0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.[0-9]\+\)\(.*\)/\2 ERROR \1\3/g" > /tmp/errors.txt;
if (( $(stat -c "%s" /tmp/errors.txt) > 10485760 )); then
ERR_NAME=big-errors.txt;
else
ERR_NAME=errors.txt;
fi;
mv /tmp/errors.txt /var/log/extra/${ERR_NAME}
# logstash.txt file format expects to follow a strict format (console) like:
# TIMESTAMP_ISO8601 | message
# If timestamp is missing on a line, previous value will be used.
# https://opendev.org/openstack/logstash-filters/src/branch/master/filters/openstack-filters.conf#L6-L20
# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns#L71
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString
# Valid examples:
# 2011-10-05T14:48:00.000Z | foo
# 2011-10-05T14:48:00Z | foo
# 2011-10-05 14:48:00 | foo
- name: Create a index file for logstash
# This removes and regenerates timestamp suffix to a know valid formart,
# but we should improve the code to keep original when valid.
vars:
suffix: "{{ ansible_date_time.iso8601_micro }} | "
shell: >
find {{ artcl_logstash_files | default([]) | join(" ") }} 2>/dev/null |
xargs -r sed
-E "s/^[0-9[:space:].:TZ|-]+ //g; s/^/{{ suffix }}/"
>> /var/log/extra/logstash.txt

View File

@ -1,45 +0,0 @@
---
- name: Ensure required python packages are installed
pip:
requirements: "{{ local_working_dir }}/share/ansible/roles/collect-logs/docs/doc-requirements.txt"
executable: "{{ local_working_dir }}/bin/pip"
- name: Generate rST docs from scripts and move to Sphinx src dir
shell: >
awk -f "{{ local_working_dir }}/share/ansible/roles/collect-logs/scripts/doc_extrapolation.awk" \
"{{ artcl_collect_dir }}/undercloud/home/{{ undercloud_user }}/{{ item }}.sh" > \
"{{ artcl_docs_source_dir }}/{{ item }}.rst"
with_items: "{{ artcl_create_docs_payload.included_deployment_scripts }}"
ignore_errors: true # noqa: ignore-errors
changed_when: true
- name: Fetch static rST docs to include in output docs
shell: >
cp "{{ artcl_docs_source_dir }}/../static/{{ item }}.rst" "{{ artcl_docs_source_dir }}"
with_items: "{{ artcl_create_docs_payload.included_static_docs }}"
ignore_errors: true # noqa: ignore-errors
changed_when: true
- name: Generate fresh index.rst for Sphinx
template:
src: index.rst.j2
dest: "{{ artcl_docs_source_dir }}/index.rst"
force: true
mode: 0644
- name: Ensure docs dir exists
file:
path: "{{ artcl_collect_dir }}/docs"
state: directory
mode: 0755
- name: Build docs with Sphinx
shell: >
set -o pipefail &&
sphinx-build -b html "{{ artcl_docs_source_dir }}" "{{ artcl_docs_build_dir }}"
2>&1 {{ timestamper_cmd }} > {{ artcl_collect_dir }}/docs/sphinx_build.log
args:
executable: /bin/bash
ignore_errors: true # noqa: ignore-errors
changed_when: true

View File

@ -1,48 +0,0 @@
---
- name: gather facts used by role
setup:
gather_subset: "!min,pkg_mgr,python"
when: "['pkg_mgr', 'python'] | difference(ansible_facts.keys()|list)"
- name: Load unsecure.yml defaults when outside zuul
when: zuul is not defined
include_vars: unsecure.yml
- name: Load operating system specific variables
include_vars: "{{ item }}"
failed_when: false
# pattern: v3
loop:
- "family-{{ ansible_os_family | lower }}.yml"
- "family-{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ ansible_distribution | lower | replace(' ', '-') }}.yml"
- "{{ ansible_distribution | lower | replace(' ', '-') }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ ansible_distribution | lower | replace(' ', '-') }}-{{ ansible_distribution_version.split('.')[0:2] | join('-') | lower }}.yml"
tags:
- always
- name: Collect logs
include: collect.yml
when: artcl_collect|bool
- name: Generate docs
include: create-docs.yml
when:
- artcl_gen_docs|bool
- not artcl_collect|bool
- name: Publish logs
include: publish.yml
when:
- artcl_publish|bool
- not artcl_collect|bool
- name: Verify Sphinx build
shell: | # noqa 305
grep -q "{{ item }}" "{{ artcl_collect_dir }}/docs/build/index.html"
with_items: "{{ artcl_create_docs_payload.table_of_contents }}"
changed_when: false
when:
- artcl_gen_docs|bool
- artcl_verify_sphinx_build|bool
- not artcl_collect|bool

View File

@ -1,150 +0,0 @@
---
# collection dir could be either a dir or a link
# file module cannot be used here, because it changes link to dir
# when called with state: directory
- name: Ensure the collection directory exists
shell: |
if [[ ! -d "{{ artcl_collect_dir }}" && ! -h "{{ artcl_collect_dir }}" ]]; then
mkdir -p "{{ artcl_collect_dir }}"
fi
changed_when: true
- name: Fetch console log
shell: >
set -o pipefail &&
curl -k "{{ artcl_build_url }}/timestamps/?time=yyyy-MM-dd%20HH:mm:ss.SSS%20|&appendLog&locale=en_GB"
> {{ artcl_collect_dir }}/console.log
args:
executable: /bin/bash
when:
- artcl_build_url is defined
- artcl_build_url|length > 0
- include: sova.yml
ignore_errors: true
- import_tasks: publish_ara.yml
when: ara_enabled|bool
ignore_errors: true
- name: fetch stackviz results to the root of the collect_dir
shell: >
if [ -d {{ artcl_collect_dir }}/undercloud/var/log/extra/stackviz/data ]; then
cp -r {{ artcl_collect_dir }}/undercloud/var/log/extra/stackviz {{ artcl_collect_dir }};
gunzip -fr {{ artcl_collect_dir }}/stackviz;
fi;
changed_when: true
- name: fetch stackviz results to the root of the collect_dir for os_tempest
shell: >
if [ -d {{ artcl_collect_dir }}/undercloud/var/log/tempest/stackviz/data ]; then
cp -r {{ artcl_collect_dir }}/undercloud/var/log/tempest/stackviz {{ artcl_collect_dir }};
gunzip -fr {{ artcl_collect_dir }}/stackviz;
fi;
when: use_os_tempest is defined
changed_when: true
- name: tempest results to the root of the collect_dir
shell: >
cp {{ artcl_collect_dir }}/undercloud/home/stack/tempest/tempest.{xml,html}{,.gz} {{ artcl_collect_dir }} || true;
gunzip {{ artcl_collect_dir }}/tempest.{xml,html}.gz || true;
changed_when: true
- name: Copy and unzip testrepository.subunit file to the root of {{ artcl_collect_dir }} for os_tempest
shell: >
cp {{ artcl_collect_dir }}/undercloud/var/log/tempest/testrepository.subunit* {{ artcl_collect_dir }} || true;
gunzip {{ artcl_collect_dir }}/testrepository.subunit.gz || true;
changed_when: true
- name: Fetch .sh and .log files from local working directory on localhost
shell: >
cp {{ item }} {{ artcl_collect_dir }}/
with_items:
- "{{ local_working_dir }}/*.sh"
- "{{ local_working_dir }}/*.log"
ignore_errors: true # noqa: ignore-errors
changed_when: true
# the doc footer for logging has been removed.
# copy the log readme into the base directory.
- name: copy in the logs README.html
shell: >
if [ -f {{ artcl_readme_path }} ]; then
cp {{ artcl_readme_path }} {{ artcl_readme_file }};
fi;
ignore_errors: true # noqa: ignore-errors
changed_when: true
- name: Rename compressed text based files to end with txt.gz extension
shell: >
set -o pipefail &&
find {{ artcl_collect_dir }}/ -type f |
awk 'function rename(orig)
{ new=orig; sub(/\.gz$/, ".txt.gz", new); system("mv " orig " " new) }
/\.(conf|ini|json|sh|log|yaml|yml|repo|cfg|j2|py)\.gz$/ { rename($0) }
/(\/var\/log\/|\/etc\/)[^ \/\.]+\.gz$/ { rename($0) }';
args:
executable: /bin/bash
when: artcl_txt_rename|bool
- name: Create the zuul-based reproducer script if we are running on zuul
include_role:
name: create-zuul-based-reproducer
when: zuul is defined
# This is where upload starts, no local change after this line
# All tags after this line are marked with "notest" to allow testing (molecule) in absence of access to log servers.
- name: upload to the artifact server using pubkey auth # noqa: command-instead-of-module
command: >
rsync -av
--quiet -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {{ artcl_report_server_key|default('') }}"
{{ artcl_collect_dir }}/ {{ artcl_rsync_path }}/{{ artcl_build_tag }}
async: "{{ artcl_publish_timeout }}"
poll: 15
retries: 5
delay: 60
when: artcl_use_rsync|bool and not artcl_rsync_use_daemon|bool
tags: notest
- name: upload to the artifact server using password auth # noqa: command-instead-of-module
environment:
RSYNC_PASSWORD: "{{ artcl_rsync_password | default(omit) }}"
command: rsync -av --quiet {{ artcl_collect_dir }}/ {{ artcl_rsync_path }}/{{ artcl_build_tag }}
async: "{{ artcl_publish_timeout }}"
poll: 15
retries: 5
delay: 60
when: artcl_use_rsync|bool and artcl_rsync_use_daemon|bool
tags: notest
- name: upload to swift based artifact server
shell: swift upload --quiet --header "X-Delete-After:{{ artcl_swift_delete_after }}" {{ artcl_swift_container }}/{{ artcl_build_tag }} *
args:
chdir: "{{ artcl_collect_dir }}"
changed_when: true
environment:
OS_AUTH_URL: "{{ artcl_swift_auth_url }}"
OS_USERNAME: "{{ artcl_swift_username }}"
OS_PASSWORD: "{{ artcl_swift_password }}"
OS_TENANT_NAME: "{{ artcl_swift_tenant_name }}"
async: "{{ artcl_publish_timeout }}"
poll: 15
when: artcl_use_swift|bool
tags: notest
- name: use zuul_swift_upload.py to publish the files
shell: >
"{{ artcl_zuul_swift_upload_path }}/zuul_swift_upload.py --name {{ artcl_swift_container }}
--delete-after {{ artcl_swift_delete_after }} {{ artcl_collect_dir }}"
async: "{{ artcl_publish_timeout }}"
poll: 15
when: artcl_use_zuul_swift_upload|bool
tags: notest
- name: create the artifact location redirect file
template:
src: full_logs.html.j2
dest: "{{ artcl_collect_dir }}/full_logs.html"
mode: 0644
when: artcl_env != 'tripleo-ci'
tags: notest

View File

@ -1,43 +0,0 @@
---
- when: ara_generate_html|bool
block:
- name: Generate and retrieve the ARA static playbook report
shell: >
{{ local_working_dir }}/bin/ara generate html {{ local_working_dir }}/ara_oooq;
{{ local_working_dir }}/bin/ara task list --all -f json > {{ artcl_collect_dir }}/ara.json;
cp -r {{ local_working_dir }}/ara_oooq {{ artcl_collect_dir }}/;
- name: Generate and retrieve the ARA static playbook report for undercloud
shell: >
{{ local_working_dir }}/bin/ara generate html {{ local_working_dir }}/ara_oooq_uc;
{{ local_working_dir }}/bin/ara task list --all -f json > {{ artcl_collect_dir }}/ara.oooq.uc.json;
cp -r {{ local_working_dir }}/ara_oooq_uc {{ artcl_collect_dir }}/;
environment:
ARA_DATABASE: 'sqlite:///{{ working_dir }}/ara_db.sql'
- name: Generate and retrieve the ARA static playbook report for OC deploy
become: true
shell: >
{{ local_working_dir }}/bin/ara generate html {{ local_working_dir }}/ara_oooq_oc;
{{ local_working_dir }}/bin/ara task list --all -f json > {{ artcl_collect_dir }}/ara.oooq.oc.json;
cp -r {{ local_working_dir }}/ara_oooq_oc {{ artcl_collect_dir }}/;
ignore_errors: true # noqa: ignore-errors
environment:
ARA_DATABASE: 'sqlite:///{{ ara_overcloud_db_path }}'
- name: Copy ara files to ara-report directories # noqa: deprecated-command-syntax
shell: |
mkdir -p {{ artcl_collect_dir }}/{{ item.dir }}/ara-report;
cp {{ item.file }} {{ artcl_collect_dir }}/{{ item.dir }}/ara-report/ansible.sqlite;
loop:
- dir: ara_oooq
file: "{{ local_working_dir }}/ara.sqlite"
- dir: ara_oooq_overcloud
file: "{{ ara_overcloud_db_path }}"
when: not ara_generate_html|bool
- import_tasks: publish_ara_graphite.yml
when: ara_graphite_server is defined
- import_tasks: publish_ara_influxdb.yml
when: influxdb_url is defined or influxdb_create_data_file|bool

View File

@ -1,14 +0,0 @@
---
- name: Get ARA json data
shell: | # noqa 305
{{ local_working_dir }}/bin/ara result list --all -f json
register: ara_data
changed_when: false
- name: Send to graphite
ara_graphite:
graphite_host: "{{ ara_graphite_server }}"
ara_mapping: "{{ ara_tasks_map }}"
ara_data: "{{ ara_data.stdout|to_json }}"
graphite_prefix: "{{ ara_graphite_prefix | default('') }}"
only_successful_tasks: "{{ ara_only_successful_tasks }}"

View File

@ -1,69 +0,0 @@
---
- name: Get ARA json data
shell: | # noqa 305
{{ local_working_dir }}/bin/ara result list --all -f json
register: ara_data
no_log: true
changed_when: false
- name: Collect and send data to InfluxDB
ara_influxdb:
influxdb_url: "{{ influxdb_url|default('') }}"
influxdb_port: "{{ influxdb_port }}"
influxdb_user: "{{ influxdb_user }}"
influxdb_password: "{{ influxdb_password }}"
influxdb_db: "{{ influxdb_dbname }}"
ara_data: "{{ ara_data.stdout|to_json }}"
measurement: "{{ influxdb_measurement }}"
data_file: "{{ influxdb_data_file_path }}"
only_successful_tasks: "{{ influxdb_only_successful_tasks }}"
no_log: true
- name: Get ARA json data for undercloud
become: true
shell: "{{ local_working_dir }}/bin/ara result list --all -f json" # noqa 305
register: ara_root_data
no_log: true
changed_when: false
- name: Collect and send data to InfluxDB
ara_influxdb:
influxdb_url: "{{ influxdb_url|default('') }}"
influxdb_port: "{{ influxdb_port }}"
influxdb_user: "{{ influxdb_user }}"
influxdb_password: "{{ influxdb_password }}"
influxdb_db: "{{ influxdb_dbname }}"
ara_data: "{{ ara_root_data.stdout|to_json }}"
measurement: "undercloud"
data_file: "{{ influxdb_data_file_path }}"
only_successful_tasks: "{{ influxdb_only_successful_tasks }}"
mapped_fields: false
standard_fields: false
longest_tasks: 15
when: ara_root_data.stdout != "[]"
no_log: true
- name: Get ARA json data for overcloud
shell: "{{ local_working_dir }}/bin/ara result list --all -f json" # noqa 305
register: ara_oc_data
environment:
ARA_DATABASE: 'sqlite:///{{ ara_overcloud_db_path }}'
no_log: true
changed_when: false
- name: Collect and send data to InfluxDB
ara_influxdb:
influxdb_url: "{{ influxdb_url|default('') }}"
influxdb_port: "{{ influxdb_port }}"
influxdb_user: "{{ influxdb_user }}"
influxdb_password: "{{ influxdb_password }}"
influxdb_db: "{{ influxdb_dbname }}"
ara_data: "{{ ara_oc_data.stdout|to_json }}"
measurement: "overcloud"
data_file: "{{ influxdb_data_file_path }}"
only_successful_tasks: "{{ influxdb_only_successful_tasks }}"
mapped_fields: false
standard_fields: false
longest_tasks: 15
when: ara_oc_data.stdout != "[]"
no_log: true

View File

@ -1,14 +0,0 @@
---
# See README section 'Sanitizing Log Strings'
- name: Sanitize logs to remove senstive details
find:
paths: "{{ outer_item.dir_path }}"
patterns: "{{ outer_item.file_pattern }}"
register: files_with_pattern
- name: Replace orig_string with sanitized_string in the files
replace:
path: "{{ item.path }}"
regexp: "{{ outer_item.orig_string }}"
replace: "{{ outer_item.sanitized_string }}"
with_items: "{{ files_with_pattern.files }}"

View File

@ -1,47 +0,0 @@
---
- block:
- name: Load sova patterns from URL
uri:
url: https://opendev.org/openstack/tripleo-ci-health-queries/raw/branch/master/output/sova-pattern-generated.json
method: GET
return_content: true
status_code: 200
body_format: json
retries: 3 # to avoid accidental failures due to networking or rate limiting
delay: 60
register: pattern_config
- name: Set sova_config from URL content
set_fact:
sova_config: "{{ pattern_config.json }}"
when: sova_config_file is not defined
- block:
- name: Load sova patterns from local file
command: cat "{{ sova_config_file }}"
register: sova_config_file_output
- name: Set sova_config from local file
set_fact:
sova_config: "{{ sova_config_file_output.stdout | from_json }}"
when: sova_config_file is defined
- name: Run sova task
sova:
config: "{{ sova_config }}"
files:
console: "{{ ansible_user_dir }}/workspace/logs/quickstart_install.log"
errors: "/var/log/errors.txt"
"ironic-conductor": "/var/log/containers/ironic/ironic-conductor.log"
syslog: "/var/log/journal.txt"
logstash: "/var/log/extra/logstash.txt"
registry_log: "/var/log/extra/podman/containers/docker_registry/stdout.log"
result: "{{ ansible_user_dir }}/workspace/logs/failures_file"
result_file_dir: "{{ ansible_user_dir }}/workspace/logs"
- name: Run sova task (denials)
sova:
config: "{{ sova_config }}"
files:
selinux: "/var/log/extra/denials.txt"
result: "{{ ansible_user_dir }}/workspace/logs/selinux_denials.log"

View File

@ -1,14 +0,0 @@
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url={{ artcl_full_artifact_url }}">
<script type="text/javascript">
window.location.href = "{{ artcl_full_artifact_url }}"
</script>
<title>Redirection to logs</title>
</head>
<body>
If you are not redirected automatically, follow the <a href='{{ artcl_full_artifact_url }}'>link to the logs</a>.
</body>
</html>

View File

@ -1,22 +0,0 @@
Welcome to collect-logs Documentation:
===================================
.. note:: This documentation was generated by the collect-logs_ role. If you
find any problems, please note the TripleO-Quickstart call, if available,
that was used to deploy the environment and create a bug on Launchpad for
tripleo-quickstart_.
.. _collect-logs: https://github.com/openstack/tripleo-quickstart-extras/tree/master/roles/collect-logs#documentation-generation-related
.. _tripleo-quickstart: https://bugs.launchpad.net/tripleo-quickstart/+filebug
--------
Contents
--------
.. toctree::
:maxdepth: 2
:numbered:
{% for doc in artcl_create_docs_payload.table_of_contents %}
{{ doc }}
{% endfor %}

View File

@ -1,20 +0,0 @@
echo "+ ip -o link" > {{ odl_extra_info_log }};
ip -o link &>> {{ odl_extra_info_log }};
echo "+ ip -o addr" >> {{ odl_extra_info_log }};
ip -o addr &>> {{ odl_extra_info_log }};
echo "+ arp -an" >> {{ odl_extra_info_log }};
arp -an &>> {{ odl_extra_info_log }};
echo "+ ip netns list" >> {{ odl_extra_info_log }};
ip netns list &>> {{ odl_extra_info_log }};
echo "+ ovs-ofctl -OOpenFlow13 show br-int" >> {{ odl_extra_info_log }};
ovs-ofctl -OOpenFlow13 show br-int &>> {{ odl_extra_info_log }};
echo "+ ovs-ofctl -OOpenFlow13 dump-flows br-int" >> {{ odl_extra_info_log }};
ovs-ofctl -OOpenFlow13 dump-flows br-int &>> {{ odl_extra_info_log }};
echo "+ ovs-ofctl -OOpenFlow13 dump-groups br-int" >> {{ odl_extra_info_log }};
ovs-ofctl -OOpenFlow13 dump-groups br-int &>> {{ odl_extra_info_log }};
echo "+ ovs-ofctl -OOpenFlow13 dump-group-stats br-int" >> {{ odl_extra_info_log }};
ovs-ofctl -OOpenFlow13 dump-group-stats br-int &>> {{ odl_extra_info_log }};
echo "+ ovs-vsctl list Open_vSwitch" >> {{ odl_extra_info_log }};
ovs-vsctl list Open_vSwitch &>> {{ odl_extra_info_log }};
echo "+ ovs-vsctl show" >> {{ odl_extra_info_log }};
ovs-vsctl show &>> {{ odl_extra_info_log }};

View File

@ -1,30 +0,0 @@
# Exclude these paths to speed up the filtering
# These need to be removed/made more specific if we decide to collect
# anything under these paths
- /dev
- /proc
- /run
- /sys
# Exclude paths
{% for exclude_path in artcl_exclude_list|default([]) %}
- {{ exclude_path }}
{% endfor %}
# Include all subdirectories and log files in the check
# See "INCLUDE/EXCLUDE PATTERN RULES" section about --recursive
# in the rsync man page
+ */
+ *.log
# Include paths
{% for include_path in collect_list|default([]) %}
{% if include_path|list|last == "/" %}
+ {{ include_path }}**
{% else %}
+ {{ include_path }}
{% endif %}
{% endfor %}
# Exclude everything else
- *

View File

@ -1,7 +0,0 @@
---
# list of packages to be installed for collection
artcl_collect_pkg_list:
- gzip
- rsync
- socat
- tar

View File

@ -1,66 +0,0 @@
---
artcl_collect_list:
- /etc/
- /etc/neutron
- /etc/tempest/saved_state.json
- /etc/tempest/tempest.conf
- /etc/tempest/tempest.conf.sample
- /etc/tempest/*.xml
- /home/*/*/black_list_*
- /home/*/composable_roles/network/nic-configs/
- /home/*/composable_roles/*/*.yaml
- /home/*/composable_roles/*.yaml
- /home/*/*.conf
- /home/*/deploy-overcloudrc
- /home/*/.instack/install-undercloud.log
- /home/*/inventory/group_vars/*.yml
- /home/*/*.json
- /home/*/*.log
- /home/*/openshift_deploy_logs/*.log
- /home/*/ovb
- /home/*/overcloud_deploy.sh
- /home/*/overcloudrc*
- /home/*/overcloudrc*
- /home/*/robot/
- /home/*/*.sh
- /home/*/shiftstackrc*
- /home/*/stackrc
- /home/*/tempest*/etc/*.conf
- /home/*/tempest*/*.log
- /home/*/tempest*/saved_state.json
- /home/*/tempest*/*.xml
- /home/*/.tripleo
- /home/*/undercloud-install-*.tar.bzip2
- /home/*/virt
- /home/*/*/white_list_*
- /home/*/*.yaml
- /home/*/*.yml
- /root/
- /usr/share/ceph-osd-run.sh
- /usr/share/openstack-tripleo-heat-templates
- /var/lib/config-data/
- /var/lib/config-data/puppet-generated/
- /var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/
- /var/lib/docker-puppet
- /var/lib/heat-config
- /var/lib/mistral/
- /var/lib/openvswitch/ovn/*.db
- /var/lib/tripleo-config
- /var/log/
- /var/log/containers/opendaylight
- /var/log/extra/containers/
- /var/log/extra/podman/containers
- /var/run/heat-config
- /var/tmp/packstack
artcl_exclude_list:
- '.*'
- /etc/pki/*
- /etc/selinux/targeted/*
- /root/*.initrd*
- /root/*.tar*
- /root/*.vmlinuz*
- /root/*.qcow*
- /udev/hwdb.bin
- /var/lib/config-data/*/etc/puppet/modules
- /var/log/journal/*

View File

@ -1,10 +0,0 @@
---
# these vars are defaults overrides which are loaded only when the role is
# running in an unsecured environment (outside zuul).
# * any call env lookup plugin under zuul would generate a runtime exception
ara_graphite_prefix: "tripleo.{{ lookup('env', 'STABLE_RELEASE')|default('master', true) }}.{{ lookup('env', 'TOCI_JOBTYPE') }}."
artcl_build_url: "{{ lookup('env', 'BUILD_URL') }}"
artcl_build_tag: "{{ lookup('env', 'BUILD_TAG') }}"
artcl_rsync_password: "{{ lookup('env', 'RSYNC_PASSWORD') }}"
influxdb_data_file_path: "{{ lookup('env', 'LOCAL_WORKING_DIR')|default('/tmp', true) }}/influxdb_data"
local_working_dir: "{{ zuul_work_dir | default(lookup('env', 'HOME')) }}/.quickstart"

View File

@ -1,45 +0,0 @@
[metadata]
name = ansible-role-collect-logs
summary = ansible-role-collect-logs - An Ansible role for aggregating logs from different nodes.
description_file =
README.rst
author = TripleO Team
author_email = openstack-discuss@lists.openstack.org
home_page = https://opendev.org/openstack/ansible-role-collect-logs
classifier =
License :: OSI Approved :: Apache Software License
Development Status :: 4 - Beta
Intended Audience :: Developers
Intended Audience :: System Administrators
Intended Audience :: Information Technology
Topic :: Utilities
[global]
setup_hooks =
pbr.hooks.setup_hook
[files]
# Allows us to install the role using pip so Ansible can find it.
data_files =
share/ansible/roles/collect-logs = roles/collect_logs/*
share/ansible/roles/collect-logs/library = plugins/modules/*
share/ansible/roles/collect-logs/module_utils = plugins/module_utils/*
share/ansible/roles/collect-logs/docs = docs/*
[wheel]
universal = 1
[pbr]
skip_authors = True
skip_changelog = True
[flake8]
# black compatible settings
# https://black.readthedocs.io/en/stable/the_black_code_style.html
max_line_length = 88
extend_ignore = E203,E501,W503,BLK100
show_source = True
builtins = _
[isort]
profile=black

View File

@ -1,22 +0,0 @@
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import setuptools
__metaclass__ = type
setuptools.setup(setup_requires=["pbr"], pbr=True, py_modules=[])

View File

@ -1,9 +0,0 @@
---
- hosts: all
tasks:
- name: include ansible-role-collect-logs role
vars:
artcl_collect: true
artcl_publish: true
include_role:
name: collect_logs

View File

@ -1,4 +0,0 @@
pre-commit>=1.20.0 # MIT
pytest
pytest-mock
pyyaml

View File

@ -1,4 +0,0 @@
plugins/modules/ara_graphite.py validate-modules:missing-gplv3-license
plugins/modules/ara_influxdb.py validate-modules:missing-gplv3-license
plugins/modules/flatten_nested_dict.py validate-modules:missing-gplv3-license
plugins/modules/sova.py validate-modules:missing-gplv3-license

View File

@ -1 +0,0 @@
ignore-2.10.txt

View File

@ -1 +0,0 @@
ignore-2.10.txt

View File

@ -1 +0,0 @@
ignore-2.10.txt

View File

@ -1 +0,0 @@
ignore-2.10.txt

View File

@ -1,2 +0,0 @@
pyyaml
requests

View File

@ -1,3 +0,0 @@
pytest
pytest-mock
mock; python_version < '3.3'

View File

@ -1,76 +0,0 @@
from __future__ import absolute_import, division, print_function
import pytest
import yaml
try:
# ansible-test style imports
from ansible_collections.tripleo.collect_logs.plugins.module_utils.test_utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
from ansible_collections.tripleo.collect_logs.plugins.modules import (
flatten_nested_dict,
)
except ImportError:
# avoid collection errors running: pytest --collect-only
import os
import sys
plugins_path = os.path.join(os.path.dirname(__file__), "../../plugins/")
plugins_path = os.path.realpath(plugins_path)
sys.path.append("%s/%s" % (plugins_path, "module_utils"))
sys.path.append("%s/%s" % (plugins_path, "modules"))
import flatten_nested_dict
from test_utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
__metaclass__ = type
SAMPLE_INPUT_1 = """
data:
system:
cpuinfo:
cmd: cat /proc/cpuinfo
capture_file: /var/log/extra/cpuinfo.txt
"""
SAMPLE_OUTPUT_1 = """
data:
- cmd: cat /proc/cpuinfo
capture_file: /var/log/extra/cpuinfo.txt
name: cpuinfo
group: system
"""
class TestFlattenNestedDict(ModuleTestCase):
def test_invalid_args(self):
set_module_args(
data="invalid",
)
with pytest.raises(AnsibleFailJson) as context:
flatten_nested_dict.main()
assert context.value.args[0]["failed"] is True
assert "msg" in context.value.args[0]
def test_empty(self):
set_module_args(
data={},
)
with pytest.raises(AnsibleExitJson) as context:
flatten_nested_dict.main()
assert context.value.args[0] == {"data": [], "changed": False}
def test_one(self):
set_module_args(data=yaml.safe_load(SAMPLE_INPUT_1)["data"])
with pytest.raises(AnsibleExitJson) as context:
flatten_nested_dict.main()
assert context.value.args[0]["changed"] is False
assert context.value.args[0]["data"] == yaml.safe_load(SAMPLE_OUTPUT_1)["data"]

View File

@ -1,68 +0,0 @@
from __future__ import absolute_import, division, print_function
import pytest
try:
# ansible-test style imports
from ansible_collections.tripleo.collect_logs.plugins.module_utils.test_utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
from ansible_collections.tripleo.collect_logs.plugins.modules import sova
except ImportError:
# avoid collection errors running: pytest --collect-only
import os
import sys
plugins_path = os.path.join(os.path.dirname(__file__), "../../plugins/")
plugins_path = os.path.realpath(plugins_path)
sys.path.append("%s/%s" % (plugins_path, "module_utils"))
sys.path.append("%s/%s" % (plugins_path, "modules"))
import sova
from test_utils import (
AnsibleExitJson,
AnsibleFailJson,
ModuleTestCase,
set_module_args,
)
__metaclass__ = type
class TestFlattenNestedDict(ModuleTestCase):
def test_invalid_args(self):
set_module_args(
data="invalid",
)
with pytest.raises(AnsibleFailJson) as context:
sova.main()
assert context.value.args[0]["failed"] is True
assert "msg" in context.value.args[0]
def test_min(self):
set_module_args(
# just a file that exists on almost any platform
config={
"regexes": [{"regex": "127.0.0.1", "name": "hosts"}],
"patterns": {
"console": [
{
"id": 1,
"logstash": "",
"msg": "Overcloud stack installation: SUCCESS.",
"pattern": "Stack overcloud CREATE_COMPLETE",
"tag": "info",
}
]
},
},
files={"console": "/etc/hosts"},
)
with pytest.raises(AnsibleExitJson) as context:
sova.main()
assert context.value.args[0]["changed"] is True
assert context.value.args[0]["processed_files"] == ["/etc/hosts"]
assert "message" in context.value.args[0]
assert context.value.args[0]["tags"] == []

83
tox.ini
View File

@ -1,83 +0,0 @@
[tox]
# for tty option
minversion = 3.4.0
envlist = docs, linters, molecule
skipsdist = True
requires =
tox-ansible >= 1.5.3
tox<4
[testenv]
usedevelop = True
# Do not use openstack constraint files here, this repository is used for CI
# and should not be subject to these.
passenv =
ANSIBLE_*
CURL_CA_BUNDLE
PODMAN_*
DOCKER_*
HOME
REQUESTS_CA_BUNDLE
SSH_AUTH_SOCK
SSL_CERT_FILE
TERM
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals = bash
[testenv:bindep]
basepython = python3
# Do not install any requirements. We want this to be fast and work even if
# system dependencies are missing, since it's used to tell you what system
# dependencies are missing! This also means that bindep must be installed
# separately, outside of the requirements files.
deps = bindep
commands = bindep test
[testenv:linters]
basepython = python3
setenv =
ANSIBLE_LIBRARY=./library
commands =
# check only modified files:
python -m pre_commit run -a
[testenv:releasenotes]
basepython = python3
whitelist_externals = bash
commands = bash -c ci-scripts/releasenotes_tox.sh
[testenv:venv]
basepython = python3
commands = {posargs}
[testenv:molecule]
setenv =
ANSIBLE_FORCE_COLOR=1
ANSIBLE_CALLBACK_WHITELIST=profile_tasks
ANSIBLE_SHOW_CUSTOM_STATS=1
PYTHONPATH={env:PYTHONPATH:}:library
# failsafe mechanism to avoid accidental disablement of tests
# 2/3 molecule scenarios are expected to pass
PYTEST_REQPASS=2
deps =
ansible-core>=2.11,<2.12
molecule[test,podman]>=3.3.2,<3.4 # MIT
pytest-molecule>=1.4.0
pytest-plus # provides support for PYTEST_REQPASS
commands =
ansible-galaxy collection install --force containers.podman:=1.8.2 community.general:=4.0.2
python -m pytest --color=yes --html={envlogdir}/reports.html --self-contained-html {tty:-s} -k molecule {posargs}
[testenv:ansible]
description = Used as base for all tox-ansible environments
# https://github.com/ansible-community/tox-ansible/issues/96
[testenv:sanity]
usedevelop = False
deps =
ansible-core>=2.11,<2.14
[testenv:units]
usedevelop = False
deps =

View File

@ -1,101 +0,0 @@
---
- job:
name: tox-ansible-test-sanity
description: Runs ansible-test sanity (tox -e sanity)
parent: tox
vars:
tox_envlist: sanity # dynamic tox env added by tox-ansible
# we want to run sanity only on py3.10 instead of implicit 2.6-3.9 range
tox_extra_args: -- --python 3.10
- job:
name: tox-ansible-test-units
description: Runs ansible-test sanity (tox -e sanity)
parent: tox
vars:
tox_envlist: units # dynamic tox env added by tox-ansible
# we want to run sanity only on py3.10 instead of implicit 2.6-3.9 range
tox_extra_args: -- --python 3.10
- job:
name: zuul-ansible-role-collect-logs
description: Validate that zuul can use that role.
parent: base
run: test-playbooks/zuul-ansible-role-collect-logs.yaml
roles:
- zuul: opendev.org/openstack/ansible-role-collect-logs
irrelevant-files:
- ^vars/sova-patterns.yml$
- job:
description: Molecule job
name: tripleo-logs-tox-molecule
parent: tripleo-tox-molecule
pre-run:
- zuul.d/playbooks/pre.yml
timeout: 3600
- project:
vars:
fail_logs_collection: true
queue: tripleo
check:
jobs: &jobs
- openstack-tox-linters
- tripleo-logs-tox-molecule
- tox-ansible-test-sanity
- tox-ansible-test-units
- zuul-ansible-role-collect-logs
experimental:
jobs:
# Limit the number of jobs executed while still assuring a relevant
# level of coverage. If specific tasks are to be tested we should
# consider implementing functional tests for them, especially as
# running full integration does not effectively validates that the
# outcome was produced (they still rely on manual verification).
- tripleo-ci-centos-8-scenario001-standalone:
irrelevant-files: &irrelevant_sa
# do not put requirements.txt here, as it can have a huge impact
- ^.*\.md$
- ^.*\.rst$
- ^.ansible-lint$
- ^.pre-commit-config.yaml$
- ^.yamllint$
- ^Puppetfile.*$
- ^doc/.*$
- ^etc/.*$
- ^lower-constraints.txt$
- ^metadata.json$
- ^releasenotes/.*$
- ^spec/.*$
- ^test-requirements.txt$
- ^vars/sova-patterns.yml$
- ^zuul.d/tox\.yaml$
- tox.ini
vars:
consumer_job: false
build_container_images: true
tags:
- standalone
- tripleo-ci-centos-8-containers-multinode:
irrelevant-files: *irrelevant_sa
vars:
consumer_job: false
build_container_imags: true
tags:
- undercloud-setup
- undercloud-scripts
- undercloud-install
- undercloud-post-install
- tripleo-validations
- overcloud-scripts
- overcloud-prep-config
- overcloud-prep-containers
- overcloud-deploy
- overcloud-post-deploy
- overcloud-validate
gate:
jobs: *jobs
post:
jobs:
- publish-openstack-python-branch-tarball

View File

@ -1,25 +0,0 @@
---
- hosts: all
tasks:
- name: Remove docker from molecule
become: true
package:
name:
- docker-ce
- docker-ce-cli
- containerd.io
state: absent
- name: Set containers module to 3.0
become: true
shell: |
dnf module disable container-tools:rhel8 -y
dnf module enable container-tools:3.0 -y
dnf clean metadata
tags:
- skip_ansible_lint
- name: Install podman
include_role:
name: ensure-podman