Update conditionals and namespaced options

This change implements namespaced variables and conditionals in needed
services. This will ensure systems running these playbooks are able to
be deployed in isolation without making osa specific assumptions.

Change-Id: Ia20b8514144f0b0bf925d405f06ef2ddc28f1003
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2019-01-22 23:24:54 -06:00
parent 83bbe07057
commit abd6661b4e
No known key found for this signature in database
GPG Key ID: 9443251A787B9FB3
8 changed files with 126 additions and 93 deletions

View File

@ -31,7 +31,7 @@
state: "started"
options:
OnBootSec: 30min
OnUnitActiveSec: 24h
OnUnitActiveSec: 12h
Persistent: true
- service_name: "curator-size"
execstarts:
@ -42,5 +42,5 @@
state: "started"
options:
OnBootSec: 30min
OnUnitActiveSec: 5h
OnUnitActiveSec: 1h
Persistent: true

View File

@ -76,6 +76,41 @@
tags:
- sysctl
- name: Create the system group
group:
name: "{{ service_group }}"
gid: "{{ service_group_gid | default(omit) }}"
state: "present"
system: "yes"
- name: Create the system user
block:
- name: Create the system user
user:
name: "{{ service_owner }}"
uid: "{{ service_owner_uid | default(omit) }}"
group: "{{ service_group }}"
shell: "/bin/false"
system: "yes"
createhome: "no"
home: "/var/lib/{{ service_name }}"
rescue:
- name: Check for system user
debug:
msg: >-
The general user creation task failed. This typically means that the
user already exists and something in the user configuration provided
is changing the system user in way that is simply not possible at this
time. The playbooks will now simply ensure the user exists and before
carrying on to the next task. While it's not required, it may be
benificial to schedule a maintenance where the elastic services are
stopped.
- name: Ensure the system user exists
user:
name: "{{ service_owner }}"
group: "{{ service_group }}"
- name: Physical host block
block:
- name: Check for directory
@ -101,8 +136,7 @@
owner: "{{ service_owner }}"
group: "{{ service_group }}"
when:
- service_dir.stat.isdir is defined and
not service_dir.stat.isdir
- not (service_dir.stat.exists | bool)
- name: Ensure data link exists
file:
@ -112,8 +146,7 @@
group: "{{ service_group }}"
state: link
when:
- service_dir.stat.isdir is defined and
not service_dir.stat.isdir
- not (service_dir.stat.exists | bool)
when:
- physical_host == inventory_hostname
@ -124,20 +157,19 @@
path: "/openstack/{{ inventory_hostname }}/{{ service_name }}"
state: "directory"
delegate_to: "{{ physical_host }}"
- name: Pull lxc version
command: "lxc-ls --version"
delegate_to: "{{ physical_host }}"
changed_when: false
register: lxc_version
when:
- container_tech | default('lxc') == 'lxc'
tags:
- skip_ansible_lint
- name: Enable or Disable lxc three syntax
set_fact:
lxc_major_version: "{{ lxc_version.stdout.split('.')[0] }}"
when:
- container_tech | default('lxc') == 'lxc'
- name: elasticsearch datapath bind mount
lxc_container:
name: "{{ inventory_hostname }}"
@ -147,9 +179,8 @@
- "{{ elastic_lxc_template_config[(lxc_major_version | int)]['mount'] }}=/openstack/{{ inventory_hostname }}/{{ service_name }} var/lib/{{ service_name }} none bind 0 0"
- "{{ elastic_lxc_template_config[(lxc_major_version | int)]['aa_profile'] }}=unconfined"
delegate_to: "{{ physical_host }}"
when:
- container_tech | default('lxc') == 'lxc'
when:
- container_tech | default('lxc') == 'lxc'
- physical_host != inventory_hostname
- name: Ensure Java is installed
@ -165,41 +196,6 @@
tags:
- package_install
- name: Create the system group
group:
name: "{{ service_group }}"
gid: "{{ service_group_gid | default(omit) }}"
state: "present"
system: "yes"
- name: Create the system user
block:
- name: Create the system user
user:
name: "{{ service_owner }}"
uid: "{{ service_owner_uid | default(omit) }}"
group: "{{ service_group }}"
shell: "/bin/false"
system: "yes"
createhome: "yes"
home: "/var/lib/{{ service_name }}"
rescue:
- name: Check for system user
debug:
msg: >-
The general user creation task failed. This typically means that the
user already exists and something in the user configuration provided
is changing the system user in way that is simply not possible at this
time. The playbooks will now simply ensure the user exists and before
carrying on to the next task. While it's not required, it may be
benificial to schedule a maintenance where the elastic services are
stopped.
- name: Ensure the system user exists
user:
name: "{{ service_owner }}"
group: "{{ service_group }}"
- name: Ensure service directories exists
file:
path: "/etc/{{ service_name }}"

View File

@ -152,11 +152,11 @@ Create some basic passwords keys that are needed by fleet
.. code-block:: bash
echo "kolide_fleet_db_password: $(openssl rand -base64 16)" > /etc/openstack_deploy/user_secrets.yml
echo "kolide_fleet_db_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml
echo "kolide_fleet_jwt_key: $(openssl rand -base64 32)" >> /etc/openstack_deploy/user_secrets.yml
echo "kolide_fleet_admin_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml
# NOTICE: This may already be defined
echo "galera_root_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml
echo "kolide_galera_root_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml
Install master/data Fleet nodes on the elastic-logstash containers,

View File

@ -27,10 +27,10 @@
fail:
msg: >-
The root password for the galera cluster is not defined. To proceed with this
installation define the variable `galera_root_password` in line or within a
variable file.
installation define the variable `kolide_galera_root_password` in line or
within a variable file.
when:
- galera_root_password is undefined
- kolide_galera_root_password is undefined
roles:
- role: galera_server
@ -38,5 +38,6 @@
galera_wsrep_node_name: "{{ inventory_hostname }}"
galera_cluster_name: telemetry_galera_cluster
galera_monitoring_allowed_source: "0.0.0.0/0"
galera_root_password: "{{ kolide_galera_root_password }}"
environment: "{{ deployment_environment_variables | default({}) }}"

View File

@ -16,7 +16,7 @@
- name: Create DB for service
mysql_db:
login_user: "root"
login_password: "{{ galera_root_password }}"
login_password: "{{ kolide_galera_root_password }}"
login_host: "127.0.0.1"
name: "{{ kolide_fleet_db_name }}"
state: "present"
@ -27,7 +27,7 @@
- name: Grant access to the DB for the service
mysql_user:
login_user: "root"
login_password: "{{ galera_root_password }}"
login_password: "{{ kolide_galera_root_password }}"
login_host: "127.0.0.1"
name: "{{ kolide_fleet_db_user }}"
password: "{{ kolide_fleet_db_password }}"

View File

@ -15,9 +15,10 @@
physical_host: localhost
galera_root_password: secrete
# kolide_galera_root_password: secrete
kolide_fleet_db_password: secrete
kolide_fleet_admin_password: secrete
kolide_fleet_jwt_key: secrete
kolide_galera_root_password: secrete
osa_test_repo: "openstack/openstack-ansible-ops"

View File

@ -1,41 +1,19 @@
---
all_systems:
vars: {}
vars:
# General Ansible options for OSA
ansible_become: yes
ansible_become_user: "root"
ansible_user: "root"
physical_host: "{{ inventory_hostname }}"
children:
all_hosts:
children:
hosts: {}
systems:
vars:
# General Ansible options for OSA
ansible_become: yes
ansible_become_user: "root"
ansible_user: "root"
## Grafana options
grafana_admin_password: "{{ haproxy_stats_password }}"
## Kolide options
kolide_fleet_db_password: "{{ haproxy_stats_password }}"
kolide_fleet_jwt_key: "{{ haproxy_stats_password }}"
kolide_fleet_admin_password: "{{ haproxy_stats_password }}"
galera_root_password: "{{ galera_root_password | default(haproxy_stats_password) }}"
## Skydive options
skydive_password: "{{ haproxy_stats_password }}"
skydive_elasticsearch_servers: "{{ groups['elastic-logstash'] | map('extract', hostvars, ['ansible_host']) | list | join(',') }}"
skydive_bind_address: "{{ container_address | default(ansible_host) }}"
## Elastic-stack options
elastic_skydive_retention: 2 # Elastic retention set to 2 days max
elastic_skydive_size: 51200 # Elastic retention set to 50GiB max
## Beat options
beat_service_states:
true:
state: restarted
false:
state: stopped
elastic_retention_refresh: true
auditbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}"
filebeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}"
heartbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['utility_all'] | default([])) | string | lower)]['state'] }}"
journalbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}"
metricbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}"
packetbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['network_hosts'] | default([])) | string | lower)]['state'] }}"
## HAProxy options
haproxy_extra_services:
- service:
@ -106,21 +84,33 @@ all_systems:
children:
traefik_all:
children:
traefik_build_nodes: {}
traefik_build_nodes:
hosts: {}
vars: {}
children: {}
skydive_all:
vars:
skydive_elasticsearch_servers: "{{ groups['elastic-logstash_all'] | map('extract', hostvars, ['ansible_host']) | list | join(',') }}"
skydive_bind_address: "{{ container_address | default(ansible_host) }}"
children:
skydive_build_nodes: {}
skydive_build_nodes:
hosts: {}
vars: {}
children: {}
skydive_agents:
children:
hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
skydive_analyzers:
children:
utility_all: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
elk_all:
vars:
elastic_skydive_retention: 2 # Elastic retention set to 2 days max
elastic_skydive_size: 51200 # Elastic retention set to 50GiB max
children:
elastic-logstash_all:
children:
@ -134,6 +124,51 @@ all_systems:
children:
log_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
beats_all:
vars:
beat_service_states:
true:
state: restarted
false:
state: stopped
elastic_retention_refresh: true
auditbeat_service_state: "{{ auditbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}"
filebeat_service_state: "{{ filebeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}"
heartbeat_service_state: "{{ heartbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['kibana_all'] | default([])) | string | lower)]['state']) }}"
journalbeat_service_state: "{{ journalbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}"
metricbeat_service_state: "{{ metricbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}"
packetbeat_service_state: "{{ packetbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['network_hosts'] | default([])) | string | lower)]['state']) }}"
children:
auditbeat:
children:
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
filebeat:
children:
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
heatbeat:
children:
kibana_all: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
journalbeat:
children:
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
metricbeat:
children:
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
packetbeat:
children:
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
apm-server_all:
children:
apm-server:
children:
log_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
fleet_all:
children:
kolide-db_all:
@ -152,7 +187,7 @@ all_systems:
children:
osquery:
children:
hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited.
grafana_all:
children:

View File

@ -35,7 +35,7 @@
- skydive_service_setup_host_python_interpreter == '/opt/skydive/bin/python'
block:
- name: Create skydive venv
command: "/usr/bin/virtualenv --no-site-packages --no-setuptools /opt/skydive"
command: "virtualenv --no-site-packages --no-setuptools /opt/skydive"
args:
creates: /opt/skydive/bin/pip
@ -51,7 +51,7 @@
pip:
name:
- openstacksdk
extra_args: "-U"
extra_args: "-U --isolated"
virtualenv: /opt/skydive
- name: Show ansible interpreter