diff --git a/tools/deployment/armada/010-armada-host-setup.sh b/tools/deployment/armada/010-armada-host-setup.sh index 3e3b3a2c59..33ffff38dc 100755 --- a/tools/deployment/armada/010-armada-host-setup.sh +++ b/tools/deployment/armada/010-armada-host-setup.sh @@ -17,4 +17,4 @@ set -xe sudo apt-get install -y python3-pip -sudo pip3 install --upgrade pip +sudo pip3 install --upgrade pip requests diff --git a/tools/deployment/armada/015-armada-build.sh b/tools/deployment/armada/015-armada-build.sh index 86eaf2e703..aefb53a871 100755 --- a/tools/deployment/armada/015-armada-build.sh +++ b/tools/deployment/armada/015-armada-build.sh @@ -17,7 +17,8 @@ set -xe TMP_DIR=$(mktemp -d) - git clone --depth 1 http://github.com/openstack/airship-armada.git ${TMP_DIR}/armada + +git clone --depth 1 http://github.com/openstack/airship-armada.git ${TMP_DIR}/armada sudo pip3 install ${TMP_DIR}/armada sudo make build -C ${TMP_DIR}/armada sudo rm -rf ${TMP_DIR} diff --git a/tools/deployment/armada/020-armada-render-manifests.sh b/tools/deployment/armada/020-armada-render-manifests.sh index 129ccb2125..fd597375ba 100755 --- a/tools/deployment/armada/020-armada-render-manifests.sh +++ b/tools/deployment/armada/020-armada-render-manifests.sh @@ -37,8 +37,12 @@ export TUNNEL_DEVICE=$(ip -4 route list 0/0 | awk '{ print $5; exit }') export OSH_INFRA_PATH export OSH_PATH +# NOTE(srwilkers): We add this here due to envsubst expanding the ${tag} placeholder in +# fluentd's configuration. This ensures the placeholder value gets rendered appropriately +export tag='${tag}' + manifests="armada-cluster-ingress armada-ceph armada-lma armada-osh" for manifest in $manifests; do echo "Rendering $manifest manifest" - envsubst < ./tools/deployment/armada/multinode/$manifest.yaml > /tmp/$manifest.yaml + envsubst < ./tools/deployment/armada/manifests/$manifest.yaml > /tmp/$manifest.yaml done diff --git a/tools/deployment/armada/generate-passwords.sh b/tools/deployment/armada/generate-passwords.sh index 0bbbec9e8e..b6bc32a2ef 100755 --- a/tools/deployment/armada/generate-passwords.sh +++ b/tools/deployment/armada/generate-passwords.sh @@ -50,9 +50,15 @@ passwords="BARBICAN_DB_PASSWORD \ OPENSTACK_EXPORTER_USER_PASSWORD \ PROMETHEUS_ADMIN_PASSWORD \ RABBITMQ_ADMIN_PASSWORD \ + RADOSGW_S3_ADMIN_ACCESS_KEY \ + RADOSGW_S3_ADMIN_SECRET_KEY \ + RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY \ + RADOSGW_S3_ELASTICSEARCH_SECRET_KEY \ SWIFT_USER_PASSWORD" for password in $passwords do - export $password=$(tr -dc A-Za-z0-9 < /dev/urandom 2>/dev/null | head -c 20) + value=$(tr -dc A-Za-z0-9 < /dev/urandom 2>/dev/null | head -c 20) + export $password=$value + echo "export $password=$value" >> /tmp/passwords.env done diff --git a/tools/deployment/armada/multinode/armada-ceph.yaml b/tools/deployment/armada/manifests/armada-ceph.yaml similarity index 99% rename from tools/deployment/armada/multinode/armada-ceph.yaml rename to tools/deployment/armada/manifests/armada-ceph.yaml index d3473d5b0f..0862c9518d 100644 --- a/tools/deployment/armada/multinode/armada-ceph.yaml +++ b/tools/deployment/armada/manifests/armada-ceph.yaml @@ -118,8 +118,6 @@ data: journal: type: directory location: /var/lib/openstack-helm/ceph/osd/journal-one - logging: - fluentd: true source: type: local location: ${OSH_INFRA_PATH} @@ -190,8 +188,6 @@ data: journal: type: directory location: /var/lib/openstack-helm/ceph/osd/journal-one - logging: - fluentd: true source: type: local location: ${OSH_INFRA_PATH} diff --git a/tools/deployment/armada/multinode/armada-cluster-ingress.yaml b/tools/deployment/armada/manifests/armada-cluster-ingress.yaml similarity index 100% rename from tools/deployment/armada/multinode/armada-cluster-ingress.yaml rename to tools/deployment/armada/manifests/armada-cluster-ingress.yaml diff --git a/tools/deployment/armada/manifests/armada-lma.yaml b/tools/deployment/armada/manifests/armada-lma.yaml new file mode 100644 index 0000000000..e93321c7f0 --- /dev/null +++ b/tools/deployment/armada/manifests/armada-lma.yaml @@ -0,0 +1,1236 @@ +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: helm-toolkit +data: + chart_name: helm-toolkit + release: helm-toolkit + namespace: helm-toolkit + values: {} + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: helm-toolkit + reference: master + dependencies: [] +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ingress-controller +data: + chart_name: osh-infra-ingress-controller + release: osh-infra-ingress-controller + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ingress-controller + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ingress-controller + values: + labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + pod: + replicas: + error_page: 2 + ingress: 2 + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ingress + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ceph-config +data: + chart_name: osh-infra-ceph-config + release: osh-infra-ceph-config + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ceph-config + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ceph-config + values: + endpoints: + identity: + namespace: openstack + object_store: + namespace: ceph + ceph_mon: + namespace: ceph + labels: + jobs: + node_selector_key: openstack-control-plane + node_selector_value: enabled + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + storage_secrets: False + ceph: False + rbd_provisioner: False + cephfs_provisioner: False + client_secrets: True + rgw_keystone_user_and_endpoints: False + bootstrap: + enabled: False + conf: + rgw_ks: + enabled: True + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-provisioners + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-radosgw +data: + chart_name: osh-infra-radosgw + release: osh-infra-radosgw + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-radosgw + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-radosgw-osh-infra + values: + endpoints: + object_store: + namespace: osh-infra + ceph_object_store: + namespace: osh-infra + auth: + admin: + access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} + ceph_mon: + namespace: ceph + labels: + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + bootstrap: + enabled: False + conf: + rgw_ks: + enabled: False + rgw_s3: + enabled: True + network: + public: ${CEPH_NETWORK} + cluster: ${CEPH_NETWORK} + deployment: + storage_secrets: False + ceph: True + rbd_provisioner: False + cephfs_provisioner: False + client_secrets: False + rgw_keystone_user_and_endpoints: False + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ceph-rgw + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-ldap +data: + chart_name: osh-infra-ldap + release: osh-infra-ldap + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-ldap + install: + no_hooks: false + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-ldap + values: + labels: + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + bootstrap: + enabled: true + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: ldap + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: osh-infra-mariadb +data: + chart_name: osh-infra-mariadb + release: osh-infra-mariadb + namespace: osh-infra + wait: + timeout: 1800 + labels: + release_group: osh-infra-osh-infra-mariadb + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-osh-infra-mariadb + values: + pod: + replicas: + server: 1 + endpoints: + oslo_db: + auth: + admin: + password: ${DB_ADMIN_PASSWORD} + volume: + enabled: false + labels: + server: + node_selector_key: openstack-control-plane + node_selector_value: enabled + prometheus_mysql_exporter: + node_selector_key: openstack-control-plane + node_selector_value: enabled + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: mariadb + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: elasticsearch +data: + chart_name: elasticsearch + release: elasticsearch + namespace: osh-infra + wait: + timeout: 3600 + labels: + release_group: osh-infra-elasticsearch + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-elasticsearch + - type: pod + labels: + release_group: osh-infra-elasticsearch + component: test + values: + endpoints: + elasticsearch: + auth: + admin: + password: ${ELASTICSEARCH_ADMIN_PASSWORD} + object_store: + namespace: osh-infra + ceph_object_store: + namespace: osh-infra + auth: + admin: + access_key: ${RADOSGW_S3_ADMIN_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ADMIN_SECRET_KEY} + elasticsearch: + access_key: ${RADOSGW_S3_ELASTICSEARCH_ACCESS_KEY} + secret_key: ${RADOSGW_S3_ELASTICSEARCH_SECRET_KEY} + pod: + replicas: + data: 1 + master: 2 + labels: + elasticsearch: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + conf: + elasticsearch: + env: + java_opts: "-Xms512m -Xmx512m" + snapshots: + enabled: true + source: + type: local + location: ${OSH_INFRA_PATH} + subpath: elasticsearch + reference: master + dependencies: + - helm-toolkit +--- +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: fluent-logging +data: + chart_name: fluent-logging + release: fluent-logging + namespace: osh-infra + wait: + timeout: 3600 + labels: + release_group: osh-infra-fluent-logging + test: + enabled: false + install: + no_hooks: False + upgrade: + no_hooks: False + pre: + delete: + - type: job + labels: + release_group: osh-infra-fluent-logging + - type: pod + labels: + release_group: osh-infra-fluent-logging + component: test + values: + conf: + fluentbit: + - service: + header: service + Flush: 30 + Daemon: Off + Log_Level: info + Parsers_File: parsers.conf + - ceph_cluster_logs: + header: input + Name: tail + Tag: ceph.cluster.* + Path: /var/log/ceph/ceph.log + DB: /var/log/ceph.db + Parsers: syslog + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_audit_logs: + header: input + Name: tail + Tag: ceph.audit.* + Path: /var/log/ceph/ceph.audit.log + DB: /var/log/ceph.db + Parsers: syslog + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_mon_logs: + header: input + Name: tail + Tag: ceph.mon.* + Path: /var/log/ceph/ceph-mon**.log + DB: /var/log/ceph.db + Parsers: syslog + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - ceph_osd_logs: + header: input + Name: tail + Tag: ceph.osd.* + Path: /var/log/ceph/ceph-osd**.log + DB: /var/log/ceph.db + Parsers: syslog + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - kernel_messages: + header: input + Name: tail + Tag: kernel + Path: /var/log/kern.log + DB: /var/log/kern.db + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - kubelet: + header: input + Name: systemd + Tag: journal.* + Path: ${JOURNAL_PATH} + Systemd_Filter: _SYSTEMD_UNIT=kubelet.service + DB: /var/log/kubelet.db + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - docker_daemon: + header: input + Name: systemd + Tag: journal.* + Path: ${JOURNAL_PATH} + Systemd_Filter: _SYSTEMD_UNIT=docker.service + DB: /var/log/docker.db + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - kernel_record_modifier: + header: filter + Name: record_modifier + Match: kernel + Record: hostname ${HOSTNAME} + - systemd_modify_fields: + header: filter + Name: modify + Match: journal.** + Rename: + _BOOT_ID: BOOT_ID + _CAP_EFFECTIVE: CAP_EFFECTIVE + _CMDLINE: CMDLINE + _COMM: COMM + _EXE: EXE + _GID: GID + _HOSTNAME: HOSTNAME + _MACHINE_ID: MACHINE_ID + _PID: PID + _SYSTEMD_CGROUP: SYSTEMD_CGROUP + _SYSTEMD_SLICE: SYSTEMD_SLICE + _SYSTEMD_UNIT: SYSTEMD_UNIT + _UID: UID + _TRANSPORT: TRANSPORT + - containers_tail: + header: input + Name: tail + Tag: kube.* + Path: /var/log/containers/*.log + Parser: docker + DB: /var/log/flb_kube.db + Mem_Buf_Limit: 5MB + DB.Sync: Normal + Buffer_Chunk_Size: 1M + Buffer_Max_Size: 1M + - drop_fluentd_logs: + header: output + Name: "null" + Match: "**.fluentd**" + - kube_filter: + header: filter + Name: kubernetes + Match: kube.* + Merge_JSON_Log: On + - fluentd_output: + header: output + Name: forward + Match: "*" + Host: ${FLUENTD_HOST} + Port: ${FLUENTD_PORT} + parsers: + - docker: + header: parser + Name: docker + Format: json + Time_Key: time + Time_Format: "%Y-%m-%dT%H:%M:%S.%L" + Time_Keep: On + - syslog: + header: parser + Name: syslog + Format: regex + Regex: '^(?