Add common logging with filebeat

This commit
1. Provides a playbook to install the filebeat agent on all
   undercloud/overcloud nodes
2. Provides another playbook that adds the browbeat uuid to the
   filebeat config file and starts filebeat during browbeat run
3. Corresponding changes in browbeat.py and browbeat/tools.py
   to run the playbook to insert custom browbeat uuid in the
   filebeat configuration.

Change-Id: Idd2efaf931f4ff581db715a04adef738f81d281c
This commit is contained in:
Sai Sindhur Malleni 2020-02-27 12:23:21 -05:00
parent 3e4d4312b0
commit fbf309baee
36 changed files with 374 additions and 12 deletions

View File

@ -0,0 +1,21 @@
- hosts: undercloud
remote_user: "{{ local_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
roles:
- { role: browbeat_logging }
- hosts: controller
remote_user: "{{ host_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
roles:
- { role: browbeat_logging }
- hosts: compute
remote_user: "{{ host_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
roles:
- { role: browbeat_logging }

View File

@ -0,0 +1,30 @@
- hosts: undercloud
remote_user: "{{ local_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
vars:
config_type: undercloud
roles:
- { role: osp_version }
- { role: filebeat_setup }
- hosts: controller
remote_user: "{{ host_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
vars:
config_type: controller
roles:
- { role: osp_version }
- { role: filebeat_setup }
- hosts: compute
remote_user: "{{ host_remote_user }}"
vars_files:
- ../install/group_vars/all.yml
vars:
config_type: compute
roles:
- { role: osp_version }
- { role: filebeat_setup }

View File

@ -0,0 +1,13 @@
- name: restart filebeat
service:
name: filebeat
state: restarted
become: true
- name: stop filebeat
service:
name: filebeat
state: stopped
become: true
when: not logging_status|bool

View File

@ -0,0 +1,15 @@
- name: check if filebeat config is present
stat:
path: /etc/filebeat/filebeat.yml
register: filebeat_config
- name: insert browbeat uuid
lineinfile:
path: /etc/filebeat/filebeat.yml
regexp: '^\s+browbeat_uuid'
line: ' browbeat_uuid: "{{browbeat_uuid}}"'
become: true
when: filebeat_config.stat.exists
notify:
- restart filebeat
- stop filebeat

View File

@ -0,0 +1,27 @@
- name: Install filebeat RPM
yum:
name: "{{ filebeat_url }}"
state: present
become: true
- name: include vars
include_vars:
file: vars/{{rhosp_major}}.yml
- name: Template the filebeat configuration file
template:
src: filebeat.yml.j2
dest: /etc/filebeat/filebeat.yml
owner: root
group: root
mode: 0644
become: true
- name: Start and enable filebeat
service:
name: filebeat
state: stopped
enabled: no
become: true
when: start_filebeat is defined and start_filebeat|bool

View File

@ -0,0 +1,44 @@
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
{% for log in filebeat_logs[config_type] %}
- {{ log }}
{% endfor %}
fields:
cloud_name: {{ cloud_prefix }}
browbeat_uuid: ""
setup.ilm.enabled: false
setup.template.name: "browbeat-logs"
setup.template.pattern: "browbeat-logs-*"
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 1
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["{{ es_ip }}:{{ es_local_port }}"]
index: "browbeat-logs-{{ cloud_prefix }}"

View File

@ -0,0 +1,75 @@
---
filebeat_logs:
undercloud:
- /var/log/containers/nova/nova-api.log
- /var/log/containers/nova/nova-compute.log
- /var/log/containers/nova/nova-conductor.log
- /var/log/containers/nova/nova-scheduler.log
- /var/log/containers/glance/api.log
- /var/log/containers/heat/heat-engine.log
- /var/log/containers/heat/heat_api.log
- /var/log/containers/ironic/ironic-conductor.log
- /var/log/containers/ironic-inspector/ironic-inspector.log
- /var/log/containers/keystone/keystone.log
- /var/log/containers/mistral/api.log
- /var/log/containers/mistral/engine.log
- /var/log/containers/mistral/event-engine.log
- /var/log/containers/mistral/executor.log
- /var/log/containers/mysql/mariadb.log
- /var/log/containers/neutron/dhcp-agent.log
- /var/log/containers/neutron/ironic-neutron-agent.log
- /var/log/containers/neutron/l3-agent.log
- /var/log/containers/neutron/openvswitch-agent.log
- /var/log/containers/neutron/server.log
- /var/log/containers/placement/placement.log
- /var/log/containers/zaqar/zaqar-server.log
- /var/log/containers/httpd/heat-api/heat_api_wsgi_error.log
- /var/log/containers/httpd/ironic-api/ironic_wsgi_error.log
- /var/log/containers/httpd/ironic-pxe/ipxe_vhost_error.log
- /var/log/containers/httpd/keystone/keystone_wsgi_error.log
- /var/log/containers/httpd/nova-api/nova_api_wsgi_error.log
- /var/log/containers/httpd/placement/placement_wsgi_error.log
- /var/log/containers/httpd/zaqar/zaqar_wsgi_error.log
controller:
- /var/log/containers/cinder/cinder-api.log
- /var/log/containers/cinder/cinder-manage.log
- /var/log/containers/cinder/cinder-rowsflush.log
- /var/log/containers/cinder/cinder-scheduler.log
- /var/log/containers/cinder/cinder-volume.log
- /var/log/containers/glance/api.log
- /var/log/containers/heat/heat_api_cfn.log
- /var/log/containers/heat/heat_api.log
- /var/log/containers/heat/heat-engine.log
- /var/log/containers/keystone/keystone.log
- /var/log/containers/mysql/mysqld.log
- /var/log/containers/neutron/server.log
- /var/log/containers/neutron/l3-agent.log
- /var/log/containers/neutron/dhcp-agent.log
- /var/log/containers/neutron/metadata-agent.log
- /var/log/containers/neutron/openvswitch-agent.log
- /var/log/containers/nova/nova-api.log
- /var/log/containers/nova/nova-conductor.log
- /var/log/containers/nova/nova-scheduler.log
- /var/log/containers/nova/nova-metadata-api.log
- /var/log/containers/nova/nova-novncproxy.log
- /var/log/containers/openvswitch/ovn-controller.log
- /var/log/containers/openvswitch/ovn-northd.log
- /var/log/containers/openvswitch/ovsdb-server-nb.log
- /var/log/containers/openvswitch/ovsdb-server-sb.log
- /var/log/containers/placement/placement.log
- /var/log/containers/rabbitmq/rabbit@{{ inventory_hostname }}.log
- /var/log/containers/httpd/heat-api/heat_api_wsgi_error.log
- /var/log/containers/httpd/ironic-api/ironic_wsgi_error.log
- /var/log/containers/httpd/ironic-pxe/ipxe_vhost_error.log
- /var/log/containers/httpd/keystone/keystone_wsgi_error.log
- /var/log/containers/httpd/nova-api/nova_api_wsgi_error.log
- /var/log/containers/httpd/placement/placement_wsgi_error.log
- /var/log/containers/httpd/cinder-api/cinder_wsgi_error.log
- /var/log/containers/httpd/heat-api-cfn/heat_api_cfn_wsgi_error.log
- /var/log/containers/httpd/nova-metadata/nova_metadata_wsgi_error.log
- /var/log/containers/swift/swift.log
compute:
- /var/log/containers/libvirt/libvirtd.log
- /var/log/containers/neutron/ovn-metadata-agent.log
- /var/log/containers/nova/nova-compute.log
- /var/log/containers/openvswitch/ovn-controller.log

View File

@ -45,6 +45,9 @@ shaker_venv: "{{browbeat_path}}/.shaker-venv"
# Shaker version to Install
shaker_version: 1.3.2
# Custom name to use for the cloud
cloud_prefix: openstack
# Configuration items to adjust browbeat results served through httpd
browbeat_results_port: 9001
browbeat_results_in_httpd: true
@ -324,7 +327,7 @@ graphite_host:
graphite_port: 80
carbon_cache_port: 2003
# Graphite prefix / Cloud name used both with graphite and grafana dashboards
graphite_prefix:
graphite_prefix: "{{cloud_prefix}}"
# Graphite username and password for login on the dashboard
# credential aren't created when you deploy graphite, use manage.py
graphite_username: root
@ -404,7 +407,7 @@ rsyslog_elasticsearch_server: "{{es_ip}}"
rsyslog_elasticsearch_port: "{{es_local_port}}"
rsyslog_aggregator_server: "{{es_ip}}"
rsyslog_aggregator_port: "7894"
rsyslog_cloud_name: "{{graphite_prefix}}"
rsyslog_cloud_name: "{{cloud_prefix}}"
disk_backed_rsyslog: false
rsyslog_forwarding: true
# If true up to 2gb of messages will be logged
@ -415,10 +418,16 @@ rsyslog_forwarding: true
# due to utilizing elasticsearch connectors, general
# usage may want to disable this option due to security reasons
# in which case you should set this to false
es_ip:
## Elasticsearch options ##
es_ip: 1.1.1.1
es_local_port: 9200
es_listen_external: true
elastic5: false
## Filebeat options ##
filebeat_url: https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.6.0-x86_64.rpm
### kibana options ###
# change this to affect nginx-wrapped htpasswd authentication
kibana_user: admin

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: true
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -10,6 +10,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -31,6 +32,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -161,9 +161,10 @@ def main():
"If you meant 'all' use: './browbeat.py all' or './browbeat.py'")
exit(1)
browbeat_uuid = browbeat.elastic.browbeat_uuid
result_dir_ts = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
_logger.info("Browbeat test suite kicked off")
_logger.info("Browbeat UUID: {}".format(browbeat.elastic.browbeat_uuid))
_logger.info("Browbeat UUID: {}".format(browbeat_uuid))
if _config['elasticsearch']['enabled']:
_logger.info("Checking for Metadata")
metadata_exists = tools.check_metadata()
@ -176,6 +177,11 @@ def main():
_logger.info("Regathering Metadata")
tools.gather_metadata()
if _config['filebeat']['enabled']:
_logger.info("Enabling filebeat for log collection"
" with browbeat_uuid {}".format(browbeat_uuid))
tools.common_logging(browbeat_uuid, logging_status=True)
_logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads)))
# Iteration rerun_type pushes rerun logic down to the workload itself. This allows the workload
@ -194,15 +200,20 @@ def main():
if base.WorkloadBase.failure > 0:
_logger.info(
"Browbeat finished with test failures, UUID: {}".format(browbeat.elastic.browbeat_uuid))
"Browbeat finished with test failures, UUID: {}".format(browbeat_uuid))
sys.exit(1)
if base.WorkloadBase.index_failures > 0:
_logger.info("Browbeat finished with Elasticsearch indexing failures, UUID: {}"
.format(browbeat.elastic.browbeat_uuid))
.format(browbeat_uuid))
sys.exit(2)
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat.elastic.browbeat_uuid))
if _config['filebeat']['enabled']:
_logger.info("Disabling filebeat for log collection"
" for browbeat_uuid {}".format(browbeat_uuid))
tools.common_logging("", logging_status=False)
_logger.info("Browbeat finished successfully, UUID: {}".format(browbeat_uuid))
sys.exit(0)

View File

@ -29,6 +29,9 @@ mapping:
metadata_playbook:
type: str
required: True
logging_playbook:
type: str
required: True
ssh_config:
type: str
required: True
@ -78,6 +81,13 @@ mapping:
port:
type: int
required: True
filebeat:
required: True
type: map
mapping:
enabled:
type: bool
required: True
rally:
required: True
type: map

View File

@ -92,6 +92,15 @@ class Tools(object):
self.logger.info("Metadata about cloud has been gathered")
return True
def common_logging(self, browbeat_uuid, logging_status):
os.putenv("ANSIBLE_SSH_ARGS", " -F {}".format(self.config['ansible']['ssh_config']))
ansible_cmd = \
'ansible-playbook -i {} {} -e "browbeat_uuid={} logging_status={}"' \
.format(self.config['ansible']['hosts'],
self.config['ansible']['logging_playbook'],
browbeat_uuid, logging_status)
self.run_cmd(ansible_cmd)
def post_process(self, cli):
workloads = {}
workloads['shaker'] = re.compile("shaker")

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -7,6 +7,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -28,6 +29,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -7,6 +7,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -28,6 +29,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -9,6 +9,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -30,6 +31,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -7,6 +7,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -28,6 +29,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -13,6 +13,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -34,6 +35,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -13,6 +13,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -34,6 +35,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -13,6 +13,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -34,6 +35,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -13,6 +13,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -34,6 +35,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 5
sleep_after: 5

View File

@ -10,6 +10,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -31,6 +32,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -10,6 +10,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -31,6 +32,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -9,6 +9,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -30,6 +31,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -11,6 +11,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -32,6 +33,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -9,6 +9,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -30,6 +31,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -10,6 +10,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/stockpile.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -31,6 +32,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: false
rally:
sleep_before: 0
sleep_after: 0

View File

@ -89,8 +89,8 @@ can also work with Queens release but it is not recommended.
[stack@ospd ansible]$ ansible-playbook -i hosts install/collectd.yml
(Optional) Install Rsyslogd logging with aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
(Optional) Install Rsyslogd logging with aggregation (not maintained)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First configure the values rsyslog values and elasticsearch parameters in
`ansible/install/group_vars/all.yml`. If you have a large number of hosts
@ -125,6 +125,22 @@ http://docs.grafana.org/http_api/auth/#create-api-token
[stack@ospd ansible]$ ansible-playbook -i hosts install/browbeat.yml # if not run before.
[stack@ospd ansible]$ ansible-playbook -i hosts install/grafana-dashboards.yml
(Optional) Install Browbeat Common Logging through filebeat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Browbeat can be used to setup common logging on your OpenStack Cluster using Filebeat on the
client side and Elasticsearch on the server side. Set the `cloud_prefix` and `es_ip` in
`install/group_vars/all.yml` before running the playbook to setup common logging for your cloud.
::
[stack@ospd ansible]$ # update the vars
[stack@ospd ansible]$ vi install/group_vars/all.yml
[stack@ospd ansible]$ # install filebeat
[stack@ospd ansible]$ ansible-playbook -i hosts common_logging/install_logging.yml
[stack@ospd ansible]$ # install and start filebeat
[stack@ospd ansible]$ ansible-playbook -i hosts common_logging/install_logging.yml -e "start_filebeat=True"
Not mantained (Pre-Pike): Run Overcloud checks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -604,9 +620,8 @@ Graphite dashboard included and it is recommended to install collectd on your
monitoring host such that you can see if you hit resource issues with your
monitoring host.
Install Kibana Visualizations
-------------------------------
(Optional) Install Kibana Visualizations
----------------------------------------
1. Update install/group_vars/all.yml (es_ip) to identify your ELK host.
2. Install Kibana Visualizations via Ansible playbook

View File

@ -87,6 +87,24 @@ using some simple searches such as:
shaker_uuid: 97092334-34e8-446c-87d6-6a0f361b9aa8 AND record.concurrency: 1 AND result.result_type: bandwidth
shaker_uuid: c918a263-3b0b-409b-8cf8-22dfaeeaf33e AND record.concurrency:1 AND record.test:Bi-Directional
Correlating test run with logs
------------------------------
If filebeat is enabled in the browbeat configuration file and filebeat was previously installed by running:
::
$ ansible-playbook -i hosts common_logging/install_logging.yml
as explained in the installation documentation, then
By enabling filebeat logging within the browbeat configuration file, a playbook `ansible/common_logging/browbeat_logging.yml`
is run which appends browbeat_uuid to log messages and starts filebeat pre-browbeat workload run so that log messages have
browbeat uuid appended and clears the uuid from the configuration file and stops filebeat from sending more logs post-browbeat
workload run
Interpreting Browbeat Results
-----------------------------

View File

@ -7,6 +7,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/site.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/site.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false

View File

@ -6,6 +6,7 @@ browbeat:
ansible:
hosts: ansible/hosts
metadata_playbook: ansible/gather/site.yml
logging_playbook: ansible/common_logging/browbeat_logging.yml
ssh_config: ansible/ssh-config
elasticsearch:
enabled: false
@ -27,6 +28,8 @@ grafana:
port: 3000
dashboards:
- openstack-general-system-performance
filebeat:
enabled: true
rally:
sleep_before: 0
sleep_after: 0