From abd6661b4ebd6d5fd417dddd27ea1ae3d3606869 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Tue, 22 Jan 2019 23:24:54 -0600 Subject: [PATCH] Update conditionals and namespaced options This change implements namespaced variables and conditionals in needed services. This will ensure systems running these playbooks are able to be deployed in isolation without making osa specific assumptions. Change-Id: Ia20b8514144f0b0bf925d405f06ef2ddc28f1003 Signed-off-by: Kevin Carter --- .../elastic_curator/tasks/curator_systemd.yml | 4 +- .../roles/elastic_dependencies/tasks/main.yml | 86 +++++++------- osquery/README.rst | 4 +- osquery/installDB.yml | 7 +- osquery/roles/fleet/tasks/createFleetDB.yml | 4 +- osquery/tests/test-vars.yml | 3 +- .../osa-integration-inventory.yml | 107 ++++++++++++------ .../skydive_common/tasks/skydive_keystone.yml | 4 +- 8 files changed, 126 insertions(+), 93 deletions(-) diff --git a/elk_metrics_6x/roles/elastic_curator/tasks/curator_systemd.yml b/elk_metrics_6x/roles/elastic_curator/tasks/curator_systemd.yml index bf5752f5..3c2b17ec 100644 --- a/elk_metrics_6x/roles/elastic_curator/tasks/curator_systemd.yml +++ b/elk_metrics_6x/roles/elastic_curator/tasks/curator_systemd.yml @@ -31,7 +31,7 @@ state: "started" options: OnBootSec: 30min - OnUnitActiveSec: 24h + OnUnitActiveSec: 12h Persistent: true - service_name: "curator-size" execstarts: @@ -42,5 +42,5 @@ state: "started" options: OnBootSec: 30min - OnUnitActiveSec: 5h + OnUnitActiveSec: 1h Persistent: true diff --git a/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml b/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml index 5a968535..e35394f5 100644 --- a/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml +++ b/elk_metrics_6x/roles/elastic_dependencies/tasks/main.yml @@ -76,6 +76,41 @@ tags: - sysctl +- name: Create the system group + group: + name: "{{ service_group }}" + gid: "{{ service_group_gid | default(omit) }}" + state: "present" + system: "yes" + +- name: Create the system user + block: + - name: Create the system user + user: + name: "{{ service_owner }}" + uid: "{{ service_owner_uid | default(omit) }}" + group: "{{ service_group }}" + shell: "/bin/false" + system: "yes" + createhome: "no" + home: "/var/lib/{{ service_name }}" + rescue: + - name: Check for system user + debug: + msg: >- + The general user creation task failed. This typically means that the + user already exists and something in the user configuration provided + is changing the system user in way that is simply not possible at this + time. The playbooks will now simply ensure the user exists and before + carrying on to the next task. While it's not required, it may be + benificial to schedule a maintenance where the elastic services are + stopped. + + - name: Ensure the system user exists + user: + name: "{{ service_owner }}" + group: "{{ service_group }}" + - name: Physical host block block: - name: Check for directory @@ -101,8 +136,7 @@ owner: "{{ service_owner }}" group: "{{ service_group }}" when: - - service_dir.stat.isdir is defined and - not service_dir.stat.isdir + - not (service_dir.stat.exists | bool) - name: Ensure data link exists file: @@ -112,8 +146,7 @@ group: "{{ service_group }}" state: link when: - - service_dir.stat.isdir is defined and - not service_dir.stat.isdir + - not (service_dir.stat.exists | bool) when: - physical_host == inventory_hostname @@ -124,20 +157,19 @@ path: "/openstack/{{ inventory_hostname }}/{{ service_name }}" state: "directory" delegate_to: "{{ physical_host }}" + - name: Pull lxc version command: "lxc-ls --version" delegate_to: "{{ physical_host }}" changed_when: false register: lxc_version - when: - - container_tech | default('lxc') == 'lxc' tags: - skip_ansible_lint + - name: Enable or Disable lxc three syntax set_fact: lxc_major_version: "{{ lxc_version.stdout.split('.')[0] }}" - when: - - container_tech | default('lxc') == 'lxc' + - name: elasticsearch datapath bind mount lxc_container: name: "{{ inventory_hostname }}" @@ -147,9 +179,8 @@ - "{{ elastic_lxc_template_config[(lxc_major_version | int)]['mount'] }}=/openstack/{{ inventory_hostname }}/{{ service_name }} var/lib/{{ service_name }} none bind 0 0" - "{{ elastic_lxc_template_config[(lxc_major_version | int)]['aa_profile'] }}=unconfined" delegate_to: "{{ physical_host }}" - when: - - container_tech | default('lxc') == 'lxc' when: + - container_tech | default('lxc') == 'lxc' - physical_host != inventory_hostname - name: Ensure Java is installed @@ -165,41 +196,6 @@ tags: - package_install -- name: Create the system group - group: - name: "{{ service_group }}" - gid: "{{ service_group_gid | default(omit) }}" - state: "present" - system: "yes" - -- name: Create the system user - block: - - name: Create the system user - user: - name: "{{ service_owner }}" - uid: "{{ service_owner_uid | default(omit) }}" - group: "{{ service_group }}" - shell: "/bin/false" - system: "yes" - createhome: "yes" - home: "/var/lib/{{ service_name }}" - rescue: - - name: Check for system user - debug: - msg: >- - The general user creation task failed. This typically means that the - user already exists and something in the user configuration provided - is changing the system user in way that is simply not possible at this - time. The playbooks will now simply ensure the user exists and before - carrying on to the next task. While it's not required, it may be - benificial to schedule a maintenance where the elastic services are - stopped. - - - name: Ensure the system user exists - user: - name: "{{ service_owner }}" - group: "{{ service_group }}" - - name: Ensure service directories exists file: path: "/etc/{{ service_name }}" diff --git a/osquery/README.rst b/osquery/README.rst index 571400e8..d632e067 100644 --- a/osquery/README.rst +++ b/osquery/README.rst @@ -152,11 +152,11 @@ Create some basic passwords keys that are needed by fleet .. code-block:: bash - echo "kolide_fleet_db_password: $(openssl rand -base64 16)" > /etc/openstack_deploy/user_secrets.yml + echo "kolide_fleet_db_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml echo "kolide_fleet_jwt_key: $(openssl rand -base64 32)" >> /etc/openstack_deploy/user_secrets.yml echo "kolide_fleet_admin_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml # NOTICE: This may already be defined - echo "galera_root_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml + echo "kolide_galera_root_password: $(openssl rand -base64 16)" >> /etc/openstack_deploy/user_secrets.yml Install master/data Fleet nodes on the elastic-logstash containers, diff --git a/osquery/installDB.yml b/osquery/installDB.yml index 61ea0b4e..d960a3bb 100644 --- a/osquery/installDB.yml +++ b/osquery/installDB.yml @@ -27,10 +27,10 @@ fail: msg: >- The root password for the galera cluster is not defined. To proceed with this - installation define the variable `galera_root_password` in line or within a - variable file. + installation define the variable `kolide_galera_root_password` in line or + within a variable file. when: - - galera_root_password is undefined + - kolide_galera_root_password is undefined roles: - role: galera_server @@ -38,5 +38,6 @@ galera_wsrep_node_name: "{{ inventory_hostname }}" galera_cluster_name: telemetry_galera_cluster galera_monitoring_allowed_source: "0.0.0.0/0" + galera_root_password: "{{ kolide_galera_root_password }}" environment: "{{ deployment_environment_variables | default({}) }}" diff --git a/osquery/roles/fleet/tasks/createFleetDB.yml b/osquery/roles/fleet/tasks/createFleetDB.yml index 2ad524d7..8c90dad2 100644 --- a/osquery/roles/fleet/tasks/createFleetDB.yml +++ b/osquery/roles/fleet/tasks/createFleetDB.yml @@ -16,7 +16,7 @@ - name: Create DB for service mysql_db: login_user: "root" - login_password: "{{ galera_root_password }}" + login_password: "{{ kolide_galera_root_password }}" login_host: "127.0.0.1" name: "{{ kolide_fleet_db_name }}" state: "present" @@ -27,7 +27,7 @@ - name: Grant access to the DB for the service mysql_user: login_user: "root" - login_password: "{{ galera_root_password }}" + login_password: "{{ kolide_galera_root_password }}" login_host: "127.0.0.1" name: "{{ kolide_fleet_db_user }}" password: "{{ kolide_fleet_db_password }}" diff --git a/osquery/tests/test-vars.yml b/osquery/tests/test-vars.yml index c6c1b4e9..a57e8d24 100644 --- a/osquery/tests/test-vars.yml +++ b/osquery/tests/test-vars.yml @@ -15,9 +15,10 @@ physical_host: localhost -galera_root_password: secrete +# kolide_galera_root_password: secrete kolide_fleet_db_password: secrete kolide_fleet_admin_password: secrete kolide_fleet_jwt_key: secrete +kolide_galera_root_password: secrete osa_test_repo: "openstack/openstack-ansible-ops" diff --git a/overlay-inventories/osa-integration-inventory.yml b/overlay-inventories/osa-integration-inventory.yml index ca656549..6149acd3 100644 --- a/overlay-inventories/osa-integration-inventory.yml +++ b/overlay-inventories/osa-integration-inventory.yml @@ -1,41 +1,19 @@ --- all_systems: - vars: {} + vars: + # General Ansible options for OSA + ansible_become: yes + ansible_become_user: "root" + ansible_user: "root" + physical_host: "{{ inventory_hostname }}" children: + all_hosts: + children: + hosts: {} + systems: vars: - # General Ansible options for OSA - ansible_become: yes - ansible_become_user: "root" - ansible_user: "root" - ## Grafana options - grafana_admin_password: "{{ haproxy_stats_password }}" - ## Kolide options - kolide_fleet_db_password: "{{ haproxy_stats_password }}" - kolide_fleet_jwt_key: "{{ haproxy_stats_password }}" - kolide_fleet_admin_password: "{{ haproxy_stats_password }}" - galera_root_password: "{{ galera_root_password | default(haproxy_stats_password) }}" - ## Skydive options - skydive_password: "{{ haproxy_stats_password }}" - skydive_elasticsearch_servers: "{{ groups['elastic-logstash'] | map('extract', hostvars, ['ansible_host']) | list | join(',') }}" - skydive_bind_address: "{{ container_address | default(ansible_host) }}" - ## Elastic-stack options - elastic_skydive_retention: 2 # Elastic retention set to 2 days max - elastic_skydive_size: 51200 # Elastic retention set to 50GiB max - ## Beat options - beat_service_states: - true: - state: restarted - false: - state: stopped - elastic_retention_refresh: true - auditbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}" - filebeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}" - heartbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['utility_all'] | default([])) | string | lower)]['state'] }}" - journalbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}" - metricbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state'] }}" - packetbeat_service_state: "{{ beat_service_states[(inventory_hostname in (groups['network_hosts'] | default([])) | string | lower)]['state'] }}" ## HAProxy options haproxy_extra_services: - service: @@ -106,21 +84,33 @@ all_systems: children: traefik_all: children: - traefik_build_nodes: {} + traefik_build_nodes: + hosts: {} + vars: {} + children: {} skydive_all: + vars: + skydive_elasticsearch_servers: "{{ groups['elastic-logstash_all'] | map('extract', hostvars, ['ansible_host']) | list | join(',') }}" + skydive_bind_address: "{{ container_address | default(ansible_host) }}" children: - skydive_build_nodes: {} + skydive_build_nodes: + hosts: {} + vars: {} + children: {} skydive_agents: children: - hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. skydive_analyzers: children: utility_all: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. elk_all: + vars: + elastic_skydive_retention: 2 # Elastic retention set to 2 days max + elastic_skydive_size: 51200 # Elastic retention set to 50GiB max children: elastic-logstash_all: children: @@ -134,6 +124,51 @@ all_systems: children: log_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + beats_all: + vars: + beat_service_states: + true: + state: restarted + false: + state: stopped + elastic_retention_refresh: true + auditbeat_service_state: "{{ auditbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}" + filebeat_service_state: "{{ filebeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}" + heartbeat_service_state: "{{ heartbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['kibana_all'] | default([])) | string | lower)]['state']) }}" + journalbeat_service_state: "{{ journalbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}" + metricbeat_service_state: "{{ metricbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['hosts'] | default([])) | string | lower)]['state']) }}" + packetbeat_service_state: "{{ packetbeat_service_state | default(beat_service_states[(inventory_hostname in (groups['network_hosts'] | default([])) | string | lower)]['state']) }}" + children: + auditbeat: + children: + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + filebeat: + children: + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + heatbeat: + children: + kibana_all: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + journalbeat: + children: + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + metricbeat: + children: + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + packetbeat: + children: + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + + apm-server_all: + children: + apm-server: + children: + log_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + fleet_all: children: kolide-db_all: @@ -152,7 +187,7 @@ all_systems: children: osquery: children: - hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. + all_hosts: {} # This is an osa native group, as such nothing needs to be added. Values will be inherited. grafana_all: children: diff --git a/skydive/roles/skydive_common/tasks/skydive_keystone.yml b/skydive/roles/skydive_common/tasks/skydive_keystone.yml index 3c86a93f..37269236 100644 --- a/skydive/roles/skydive_common/tasks/skydive_keystone.yml +++ b/skydive/roles/skydive_common/tasks/skydive_keystone.yml @@ -35,7 +35,7 @@ - skydive_service_setup_host_python_interpreter == '/opt/skydive/bin/python' block: - name: Create skydive venv - command: "/usr/bin/virtualenv --no-site-packages --no-setuptools /opt/skydive" + command: "virtualenv --no-site-packages --no-setuptools /opt/skydive" args: creates: /opt/skydive/bin/pip @@ -51,7 +51,7 @@ pip: name: - openstacksdk - extra_args: "-U" + extra_args: "-U --isolated" virtualenv: /opt/skydive - name: Show ansible interpreter