Remove kafka, storm, zookeeper

Their cleanup has been added to monasca cleanup command.

Change-Id: I19a846e2683ae70b33ca64d2aba7ac71eb724588
This commit is contained in:
Michal Nasiadka 2022-12-07 18:52:01 +01:00
parent a1910abe35
commit f128d19957
72 changed files with 73 additions and 798 deletions

View File

@ -100,8 +100,6 @@ Kolla Ansible deploys containers for the following infrastructure components:
- `RabbitMQ <https://www.rabbitmq.com/>`__ as a messaging backend for
communication between services.
- `Redis <https://redis.io/>`__ an in-memory data structure store.
- `Zookeeper <https://zookeeper.apache.org/>`__ an open-source server which enables
highly reliable distributed coordination.
Directories
===========

View File

@ -369,8 +369,6 @@ ironic_http_port: "8089"
iscsi_port: "3260"
kafka_port: "9092"
keystone_public_port: "5000"
keystone_public_listen_port: "{{ keystone_public_port }}"
# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
@ -549,10 +547,6 @@ venus_api_port: "10010"
watcher_api_port: "9322"
zookeeper_client_port: "2181"
zookeeper_peer_port: "2888"
zookeeper_quorum_port: "3888"
zun_api_port: "9517"
zun_wsproxy_port: "6784"
zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
@ -697,7 +691,6 @@ enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool
# TODO(yoctozepto): Remove the deprecated enable_ironic_pxe_uefi in Zed.
enable_ironic_pxe_uefi: "no"
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
enable_kafka: "no"
enable_kuryr: "no"
enable_magnum: "no"
enable_manila: "no"
@ -745,7 +738,6 @@ enable_sahara: "no"
enable_senlin: "no"
enable_skydive: "no"
enable_solum: "no"
enable_storm: "no"
enable_swift: "no"
enable_swift_s3api: "no"
enable_swift_recon: "no"
@ -756,7 +748,6 @@ enable_trove_singletenant: "no"
enable_venus: "no"
enable_vitrage: "no"
enable_watcher: "no"
enable_zookeeper: "{{ enable_kafka | bool or enable_storm | bool }}"
enable_zun: "no"
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"
@ -1156,16 +1147,6 @@ influxdb_datadir_volume: "influxdb"
influxdb_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ influxdb_http_port }}"
#################
# Kafka options
#################
kafka_datadir_volume: "kafka"
# The number of brokers in a Kafka cluster. This is used for automatically
# setting quantities such as topic replicas and it is not recommended to
# change it unless you know what you are doing.
kafka_broker_count: "{{ groups['kafka'] | length }}"
#########################
# Internal Image options
#########################

View File

@ -18,3 +18,17 @@
roles:
- { role: storm,
tags: storm }
- name: Cleanup unused Zookeeper services
hosts:
- zookeeper
roles:
- { role: zookeeper,
tags: zookeeper }
- name: Cleanup unused Kafka services
hosts:
- kafka
roles:
- { role: kafka,
tags: kafka }

View File

@ -96,10 +96,6 @@
enabled: true
- name: "conf/input/05-libvirt.conf.j2"
enabled: "{{ enable_nova | bool and enable_nova_libvirt_container | bool }}"
- name: "conf/input/06-zookeeper.conf.j2"
enabled: true
- name: "conf/input/07-kafka.conf.j2"
enabled: true
- name: "conf/input/08-prometheus.conf.j2"
enabled: "{{ enable_prometheus_fluentd_integration | bool }}"
- name: "conf/input/10-openvswitch.conf.j2"
@ -171,7 +167,6 @@
- { name: "influxdb", enabled: "{{ enable_influxdb | bool }}" }
- { name: "ironic", enabled: "{{ enable_ironic | bool }}" }
- { name: "ironic-inspector", enabled: "{{ enable_ironic | bool }}" }
- { name: "kafka", enabled: "{{ enable_kafka | bool }}" }
- { name: "keystone", enabled: "{{ enable_keystone | bool }}" }
- { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" }
- { name: "magnum", enabled: "{{ enable_magnum | bool }}" }
@ -196,14 +191,12 @@
- { name: "senlin", enabled: "{{ enable_senlin | bool }}" }
- { name: "skydive", enabled: "{{ enable_skydive | bool }}" }
- { name: "solum", enabled: "{{ enable_solum | bool }}" }
- { name: "storm", enabled: "{{ enable_storm | bool }}" }
- { name: "swift", enabled: "{{ enable_swift | bool }}" }
- { name: "tacker", enabled: "{{ enable_tacker | bool }}" }
- { name: "trove", enabled: "{{ enable_trove | bool }}" }
- { name: "venus", enabled: "{{ enable_venus | bool }}" }
- { name: "vitrage", enabled: "{{ enable_vitrage | bool }}" }
- { name: "watcher", enabled: "{{ enable_watcher | bool }}" }
- { name: "zookeeper", enabled: "{{ enable_zookeeper | bool }}" }
- { name: "zun", enabled: "{{ enable_zun | bool }}" }
template:
src: "cron-logrotate-global.conf.j2"

View File

@ -1,12 +0,0 @@
<source>
@type tail
path /var/log/kolla/zookeeper/zookeeper.log
pos_file /var/run/td-agent/zookeeper.pos
tag infra.*
<parse>
@type multiline
format_firstline /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} \S+ \S+ \S+ .*$/
format1 /^(?<Timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) \[(?<server_id>\S+)\] \S+ (?<log_level>\S+) (?<Payload>.*)$/
time_key Timestamp
</parse>
</source>

View File

@ -1,12 +0,0 @@
<source>
@type tail
path /var/log/kolla/kafka/controller.log, /var/log/kolla/kafka/server.log, /var/log/kolla/kafka/state-change.log
pos_file /var/run/td-agent/kafka.pos
tag infra.*
<parse>
@type multiline
format_firstline /^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}\] \S+ .*$/
format1 /^\[(?<Timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})\] (?<log_level>\S+) (?<Payload>.*)$/
time_key Timestamp
</parse>
</source>

View File

@ -1,3 +0,0 @@
"/var/log/kolla/kafka/*.log"
{
}

View File

@ -1,3 +0,0 @@
"/var/log/kolla/storm/*.log"
{
}

View File

@ -1,3 +0,0 @@
"/var/log/kolla/zookeeper/*.log"
{
}

View File

@ -9,7 +9,6 @@
nova_instance_datadir_volume: "{{ nova_instance_datadir_volume }}"
gnocchi_metric_datadir_volume: "{{ gnocchi_metric_datadir_volume }}"
influxdb_datadir_volume: "{{ influxdb_datadir_volume }}"
kafka_datadir_volume: "{{ kafka_datadir_volume }}"
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_external_vip_address }}"
kolla_dev_repos_directory: "{{ kolla_dev_repos_directory }}"

View File

@ -3,7 +3,7 @@ kafka_services:
kafka:
container_name: kafka
group: kafka
enabled: true
enabled: false
image: "{{ kafka_image_full }}"
environment:
LOG_DIR: "{{ kafka_log_dir }}"
@ -13,14 +13,6 @@ kafka_services:
healthcheck: "{{ kafka_healthcheck }}"
####################
# Kafka
####################
kafka_cluster_name: "kolla_kafka"
kafka_log_dir: "/var/log/kolla/kafka"
kafka_heap_opts: "-Xmx1G -Xms1G"
kafka_zookeeper: "{% for host in groups['zookeeper'] %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_client_port }}{% if not loop.last %},{% endif %}{% endfor %}"
####################
# Docker
####################

View File

@ -1,17 +0,0 @@
---
- name: Restart kafka container
vars:
service_name: "kafka"
service: "{{ kafka_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"

View File

@ -1,18 +0,0 @@
---
- name: Check kafka containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
environment: "{{ item.value.environment }}"
dimensions: "{{ item.value.dimensions }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -1 +0,0 @@
---

View File

@ -0,0 +1,22 @@
---
- name: Stop and remove containers for disabled kafka services
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "{{ item.value.container_name }}"
when:
- inventory_hostname in groups[item.value.group]
- not item.value.enabled | bool
with_dict: "{{ kafka_services }}"
- name: Removing config for any disabled services
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "absent"
become: true
when:
- inventory_hostname in groups[item.value.group]
- not item.value.enabled | bool
with_dict: "{{ kafka_services }}"
# NOTE(dszumski): Docker volume removal is currently a manual procedure

View File

@ -1,42 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- Restart kafka container
- name: Copying over kafka config
merge_configs:
sources:
- "{{ role_path }}/templates/kafka.server.properties.j2"
- "{{ node_custom_config }}/kafka.server.properties"
- "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/kafka.server.properties"
dest: "{{ node_config_directory }}/{{ item.key }}/kafka.server.properties"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ kafka_services }}"
notify:
- Restart kafka container

View File

@ -1,2 +0,0 @@
---
- import_tasks: check-containers.yml

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,25 +0,0 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ kafka_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
container_engine: "{{ kolla_container_engine }}"
name:
- kafka
register: container_facts
- name: Checking free port for Kafka
wait_for:
host: "{{ api_interface_address }}"
port: "{{ kafka_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['kafka'] is not defined
- inventory_hostname in groups['kafka']

View File

@ -1,3 +0,0 @@
---
- import_role:
role: service-images-pull

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,6 +0,0 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ kafka_services }}"
service_name: "{{ project_name }}"

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,23 +0,0 @@
{
"command": "/opt/kafka/bin/kafka-server-start.sh /etc/kafka/kafka.server.properties",
"config_files": [
{
"source": "{{ container_config_directory }}/kafka.server.properties",
"dest": "/etc/kafka/kafka.server.properties",
"owner": "kafka",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/kafka",
"owner": "kafka:kafka",
"recurse": true
},
{
"path": "/var/log/kolla/kafka",
"owner": "kafka:kafka",
"recurse": true
}
]
}

View File

@ -1,21 +0,0 @@
listeners=PLAINTEXT://{{ api_interface_address | put_address_in_context('url') }}:{{ kafka_port }}
controlled.shutdown.enable=true
auto.leader.rebalance.enable=true
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/lib/kafka/data
min.insync.replicas={{ kafka_broker_count if kafka_broker_count|int < 3 else 2 }}
default.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
num.partitions=30
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
transaction.state.log.replication.factor={{ kafka_broker_count if kafka_broker_count|int < 3 else 3 }}
transaction.state.log.min.isr={{ kafka_broker_count if kafka_broker_count|int < 3 else 2 }}
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect={{ kafka_zookeeper }}
zookeeper.connection.timeout.ms=6000

View File

@ -1,2 +0,0 @@
---
project_name: "kafka"

View File

@ -3,7 +3,7 @@ storm_services:
storm-worker:
container_name: storm_worker
group: storm-worker
enabled: "{{ enable_storm | bool }}"
enabled: false
image: "{{ storm_image_full }}"
environment:
STORM_LOG_DIR: /var/log/kolla/storm
@ -14,7 +14,7 @@ storm_services:
storm-nimbus:
container_name: storm_nimbus
group: storm-nimbus
enabled: "{{ enable_storm | bool }}"
enabled: false
image: "{{ storm_image_full }}"
environment:
STORM_LOG_DIR: /var/log/kolla/storm
@ -24,12 +24,6 @@ storm_services:
healthcheck: "{{ storm_nimbus_healthcheck }}"
####################
# Storm
####################
storm_log_settings: 'INFO,ROLLINGFILE'
storm_nimbus_servers: "{% for host in groups['storm-nimbus'] %}'{{ 'api' | kolla_address(host) }}'{% if not loop.last %},{% endif %}{% endfor %}"
####################
# Docker
####################

View File

@ -1,34 +0,0 @@
---
- name: Restart storm-worker container
vars:
service_name: "storm-worker"
service: "{{ storm_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"
- name: Restart storm-nimbus container
vars:
service_name: "storm-nimbus"
service: "{{ storm_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"

View File

@ -1,18 +0,0 @@
---
- name: Check storm containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
environment: "{{ item.value.environment }}"
dimensions: "{{ item.value.dimensions }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ storm_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -1 +0,0 @@
---

View File

@ -1,5 +1,5 @@
---
- name: Stop and remove containers for Storm services
- name: Stop and remove containers for disabled storm services
become: true
kolla_docker:
action: "stop_and_remove_container"

View File

@ -1,62 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ storm_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ storm_services }}"
notify:
- "Restart {{ item.key }} container"
- name: Copying over storm worker config
vars:
service: "{{ storm_services['storm-worker'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/storm-worker/storm.yml"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/storm/{{ inventory_hostname }}/storm.yml"
- "{{ node_custom_config }}/storm.yml"
- "storm.yml.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart storm-worker container
- name: Copying over storm nimbus config
vars:
service: "{{ storm_services['storm-nimbus'] }}"
template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/storm-nimbus/storm.yml"
mode: "0660"
become: true
with_first_found:
- "{{ node_custom_config }}/storm/{{ inventory_hostname }}/storm.yml"
- "{{ node_custom_config }}/storm.yml"
- "storm.yml.j2"
when:
- inventory_hostname in groups[service['group']]
- service.enabled | bool
notify:
- Restart storm-nimbus container

View File

@ -1,2 +0,0 @@
---
- import_tasks: check-containers.yml

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,49 +0,0 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ storm_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
container_engine: "{{ kolla_container_engine }}"
name:
- storm_worker
- storm_nimbus
register: container_facts
- name: Checking storm nimbus thrift port is available
wait_for:
host: "{{ api_interface_address }}"
port: "{{ storm_nimbus_thrift_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['storm_nimbus'] is not defined
- inventory_hostname in groups['storm-nimbus']
- name: Checking storm supervisor thrift port is available
wait_for:
host: "{{ api_interface_address }}"
port: "{{ storm_supervisor_thrift_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['storm_worker'] is not defined
- inventory_hostname in groups['storm-worker']
- name: Checking storm worker ports are available
wait_for:
host: "{{ api_interface_address }}"
port: "{{ item }}"
connect_timeout: 1
timeout: 1
state: stopped
with_sequence: "start={{ storm_worker_port_range.start | int }} end={{ storm_worker_port_range.end | int }}"
when:
- container_facts['storm_worker'] is not defined
- inventory_hostname in groups['storm-worker']

View File

@ -1,3 +0,0 @@
---
- import_role:
role: service-images-pull

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,6 +0,0 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ storm_services }}"
service_name: "{{ project_name }}"

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,23 +0,0 @@
{
"command": "/opt/storm/bin/storm nimbus",
"config_files": [
{
"source": "{{ container_config_directory }}/storm.yml",
"dest": "/opt/storm/conf/storm.yaml",
"owner": "storm",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/storm",
"owner": "storm:storm",
"recurse": true
},
{
"path": "/var/log/kolla/storm",
"owner": "storm:storm",
"recurse": true
}
]
}

View File

@ -1,23 +0,0 @@
{
"command": "/opt/storm/bin/storm supervisor",
"config_files": [
{
"source": "{{ container_config_directory }}/storm.yml",
"dest": "/opt/storm/conf/storm.yaml",
"owner": "storm",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/storm",
"owner": "storm:storm",
"recurse": true
},
{
"path": "/var/log/kolla/storm",
"owner": "storm:storm",
"recurse": true
}
]
}

View File

@ -1,14 +0,0 @@
storm.local.dir: "/var/lib/storm/data"
storm.log.dir: "/var/log/kolla/storm"
nimbus.seeds: [{{ storm_nimbus_servers }}]
storm.zookeeper.port: {{ zookeeper_client_port }}
storm.zookeeper.servers:
{% for host in groups['zookeeper'] %}
- "{{ 'api' | kolla_address(host) }}"
{% endfor %}
supervisor.slots.ports:
{% for port in range(storm_worker_port_range.start|int, storm_worker_port_range.end|int + 1) %}
- {{ port }}
{% endfor %}
supervisor.thrift.port: {{ storm_supervisor_thrift_port }}
nimbus.thrift.port: {{ storm_nimbus_thrift_port }}

View File

@ -1,2 +0,0 @@
---
project_name: "storm"

View File

@ -77,14 +77,6 @@
[[inputs.redis]]
servers = ["tcp://:{{ redis_master_password }}@{{ api_interface_address | put_address_in_context('url') }}:{{ redis_port }}"]
{% endif %}
{% if inventory_hostname in groups['zookeeper'] and enable_zookeeper | bool %}
[[inputs.zookeeper]]
servers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ zookeeper_client_port }}"]
{% endif %}
{% if inventory_hostname in groups['kafka'] and enable_kafka | bool %}
[[inputs.kafka_consumer]]
brokers = ["{{ api_interface_address | put_address_in_context('url') }}:{{ kafka_port }}"]
{% endif %}
{% if inventory_hostname in groups['mariadb'] and (enable_mariadb or enable_external_mariadb_load_balancer) | bool %}
[[inputs.mysql]]
servers = ["{{ database_user }}:{{ database_password }}@{{ mariadb_proto }}({{ api_interface_address | put_address_in_context('url') }}:{{ database_port }})/"]

View File

@ -3,7 +3,7 @@ zookeeper_services:
zookeeper:
container_name: zookeeper
group: zookeeper
enabled: true
enabled: false
image: "{{ zookeeper_image_full }}"
environment:
ZOO_LOG_DIR: /var/log/kolla/zookeeper
@ -13,11 +13,6 @@ zookeeper_services:
healthcheck: "{{ zookeeper_healthcheck }}"
####################
# Zookeeper
####################
zookeeper_log_settings: 'INFO,ROLLINGFILE'
####################
# Docker
####################

View File

@ -1,17 +0,0 @@
---
- name: Restart zookeeper container
vars:
service_name: "zookeeper"
service: "{{ zookeeper_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
environment: "{{ service.environment }}"
volumes: "{{ service.volumes }}"
dimensions: "{{ service.dimensions }}"
healthcheck: "{{ service.healthcheck | default(omit) }}"
when:
- kolla_action != "config"

View File

@ -1,18 +0,0 @@
---
- name: Check zookeeper containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
volumes: "{{ item.value.volumes }}"
environment: "{{ item.value.environment }}"
dimensions: "{{ item.value.dimensions }}"
healthcheck: "{{ item.value.healthcheck | default(omit) }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -1 +0,0 @@
---

View File

@ -0,0 +1,22 @@
---
- name: Stop and remove containers for disabled zookeeper services
become: true
kolla_docker:
action: "stop_and_remove_container"
name: "{{ item.value.container_name }}"
when:
- inventory_hostname in groups[item.value.group]
- not item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
- name: Removing config for any disabled services
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "absent"
become: true
when:
- inventory_hostname in groups[item.value.group]
- not item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
# NOTE(dszumski): Docker volume removal is currently a manual procedure

View File

@ -1,56 +0,0 @@
---
- name: Ensuring config directories exist
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
- name: Copying over config.json files for services
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
notify:
- Restart zookeeper container
- name: Copying over zookeeper configuration
merge_configs:
sources:
- "{{ role_path }}/templates/{{ item.key }}.cfg.j2"
- "{{ node_custom_config }}/{{ item.key }}.cfg"
- "{{ node_custom_config }}/{{ item.key }}/{{ inventory_hostname }}/{{ item.key }}.cfg"
whitespace: False
dest: "{{ node_config_directory }}/{{ item.key }}/{{ item.key }}.cfg"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
notify:
- Restart zookeeper container
- name: Copying over zookeeper instance id
template:
src: "myid.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/myid"
mode: "0660"
become: true
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ zookeeper_services }}"
notify:
- Restart zookeeper container

View File

@ -1,2 +0,0 @@
---
- import_tasks: check-containers.yml

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,2 +0,0 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -1,29 +0,0 @@
---
- import_role:
name: service-precheck
vars:
service_precheck_services: "{{ zookeeper_services }}"
service_name: "{{ project_name }}"
- name: Get container facts
become: true
kolla_container_facts:
container_engine: "{{ kolla_container_engine }}"
name:
- zookeeper
register: container_facts
- name: Checking zookeeper ports are available
wait_for:
host: "{{ api_interface_address }}"
port: "{{ item }}"
connect_timeout: 1
timeout: 1
state: stopped
with_items:
- "{{ zookeeper_client_port }}"
- "{{ zookeeper_peer_port }}"
- "{{ zookeeper_quorum_port }}"
when:
- container_facts['zookeeper'] is not defined
- inventory_hostname in groups['zookeeper']

View File

@ -1,3 +0,0 @@
---
- import_role:
role: service-images-pull

View File

@ -1,2 +0,0 @@
---
- import_tasks: deploy.yml

View File

@ -1,6 +0,0 @@
---
- import_role:
name: service-stop
vars:
project_services: "{{ zookeeper_services }}"
service_name: "{{ project_name }}"

View File

@ -1,7 +0,0 @@
---
- import_tasks: config.yml
- import_tasks: check-containers.yml
- name: Flush handlers
meta: flush_handlers

View File

@ -1,5 +0,0 @@
{% for host in groups['zookeeper'] -%}
{% if hostvars[host].ansible_facts.hostname == ansible_facts.hostname -%}
{{ loop.index }}
{%- endif %}
{%- endfor %}

View File

@ -1,8 +0,0 @@
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/var/lib/zookeeper/data
clientPort={{ zookeeper_client_port }}
{% for host in groups['zookeeper'] %}
server.{{ loop.index }}={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ zookeeper_peer_port }}:{{ zookeeper_quorum_port }}
{% endfor %}

View File

@ -1,29 +0,0 @@
{
"command": "/opt/zookeeper/bin/zkServer.sh start-foreground /etc/zookeeper/conf/zoo.cfg",
"config_files": [
{
"source": "{{ container_config_directory }}/myid",
"dest": "/var/lib/zookeeper/data/myid",
"owner": "zookeeper",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/zookeeper.cfg",
"dest": "/etc/zookeeper/conf/zoo.cfg",
"owner": "zookeeper",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/lib/zookeeper",
"owner": "zookeeper:zookeeper",
"recurse": true
},
{
"path": "/var/log/kolla/zookeeper",
"owner": "zookeeper:zookeeper",
"recurse": true
}
]
}

View File

@ -1,2 +0,0 @@
---
project_name: "zookeeper"

View File

@ -39,7 +39,6 @@
- enable_influxdb_{{ enable_influxdb | bool }}
- enable_ironic_{{ enable_ironic | bool }}
- enable_iscsid_{{ enable_iscsid | bool }}
- enable_kafka_{{ enable_kafka | bool }}
- enable_keystone_{{ enable_keystone | bool }}
- enable_kuryr_{{ enable_kuryr | bool }}
- enable_loadbalancer_{{ enable_loadbalancer | bool }}
@ -67,7 +66,6 @@
- enable_senlin_{{ enable_senlin | bool }}
- enable_skydive_{{ enable_skydive | bool }}
- enable_solum_{{ enable_solum | bool }}
- enable_storm_{{ enable_storm | bool }}
- enable_swift_{{ enable_swift | bool }}
- enable_tacker_{{ enable_tacker | bool }}
- enable_telegraf_{{ enable_telegraf | bool }}
@ -75,7 +73,6 @@
- enable_venus_{{ enable_venus | bool }}
- enable_vitrage_{{ enable_vitrage | bool }}
- enable_watcher_{{ enable_watcher | bool }}
- enable_zookeeper_{{ enable_zookeeper | bool }}
- enable_zun_{{ enable_zun | bool }}
tags: always
@ -348,16 +345,6 @@
- { role: collectd,
tags: collectd }
- name: Apply role zookeeper
gather_facts: false
hosts:
- zookeeper
- '&enable_zookeeper_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: zookeeper,
tags: zookeeper }
- name: Apply role influxdb
gather_facts: false
hosts:
@ -517,27 +504,6 @@
- { role: opensearch,
tags: opensearch }
- name: Apply role kafka
gather_facts: false
hosts:
- kafka
- '&enable_kafka_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: kafka,
tags: kafka }
- name: Apply role storm
gather_facts: false
hosts:
- storm-worker
- storm-nimbus
- '&enable_storm_True'
serial: '{{ kolla_serial|default("0") }}'
roles:
- { role: storm,
tags: storm }
- name: Apply role swift
gather_facts: false
hosts:

View File

@ -11,7 +11,6 @@ logging and monitoring services available in kolla.
central-logging-guide
grafana-guide
influxdb-guide
kafka-guide
monasca-guide
osprofiler-guide
prometheus-guide

View File

@ -1,25 +0,0 @@
.. _kafka-guide:
============
Apache Kafka
============
Overview
~~~~~~~~
`Kafka <https://kafka.apache.org/intro>`_ is a distributed stream processing
system. It forms the central component of Monasca and in an OpenStack context
can also be used as an experimental messaging backend in `Oslo messaging
<https://docs.openstack.org/oslo.messaging/latest/admin/kafka.html>`_.
Kafka
~~~~~
A spinning disk array is normally sufficient for Kafka. The data directory
defaults to a docker volume, ``kafka``. Since it can use a lot of disk space,
you may wish to store the data on a dedicated device. This can be achieved by
setting ``kafka_datadir_volume`` in ``/etc/kolla/globals.yml``:
.. code-block:: yaml
kafka_datadir_volume: /mnt/spinning_array/kafka/

View File

@ -20,7 +20,8 @@ The cleanup command can be invoked from the Kolla Ansible CLI, for example:
kolla-ansible monasca_cleanup
This will remove Monasca service containers, and service configuration.
This will remove Monasca service containers (including Kafka, Storm and
ZooKeeper), and service configuration.
Following cleanup, you may also choose to remove unused container volumes.
It is recommended to run this manually on each Monasca service host. Note

View File

@ -363,7 +363,6 @@ workaround_ansible_issue_8743: yes
#enable_ironic: "no"
#enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
#enable_kafka: "no"
#enable_kuryr: "no"
#enable_magnum: "no"
#enable_manila: "no"
@ -409,7 +408,6 @@ workaround_ansible_issue_8743: yes
#enable_senlin: "no"
#enable_skydive: "no"
#enable_solum: "no"
#enable_storm: "no"
#enable_swift: "no"
#enable_swift_s3api: "no"
#enable_tacker: "no"
@ -419,7 +417,6 @@ workaround_ansible_issue_8743: yes
#enable_venus: "no"
#enable_vitrage: "no"
#enable_watcher: "no"
#enable_zookeeper: "{{ enable_kafka | bool or enable_storm | bool }}"
#enable_zun: "no"
##################

View File

@ -0,0 +1,8 @@
---
upgrade:
- |
Support for deploying ``kafka``, ``storm`` and ``zookeeper`` have been
dropped since they have been used only with Monasca. Post-upgrade cleanup
of those services can be done using ``kolla-ansible monasca_cleanup`` - for
details please see
`Monasca guide <https://docs.openstack.org/kolla-ansible/zed/reference/logging-and-monitoring/monasca-guide.html>`__

View File

@ -104,9 +104,6 @@ primary
[prometheus:children]
monitoring
[kafka:children]
control
{% if is_upgrade %}
[kibana:children]
control
@ -152,9 +149,6 @@ control
[outward-rabbitmq:children]
control
[storm:children]
monitoring
[keystone:children]
control
@ -454,13 +448,6 @@ murano
[murano-engine:children]
murano
# Storm
[storm-worker:children]
storm
[storm-nimbus:children]
storm
# Ironic
[ironic-api:children]
ironic

View File

@ -68,11 +68,6 @@ if [[ "$influxdb_datadir_volume" != "influxdb" && -d "$influxdb_datadir_volume"
rm -rfv $influxdb_datadir_volume
fi
if [[ "$kafka_datadir_volume" != "kafka" && -d "$kafka_datadir_volume" ]]; then
echo "Removing kafka volume if it is customised"
rm -rfv $kafka_datadir_volume
fi
if [[ "$opensearch_datadir_volume" != "opensearch" && -d "$opensearch_datadir_volume" ]]; then
echo "Removing opensearch volume if it is customzied"
rm -rfv $opensearch_datadir_volume