diff --git a/scripts/log-collect.sh b/scripts/log-collect.sh index becb070c32..2394f9faef 100755 --- a/scripts/log-collect.sh +++ b/scripts/log-collect.sh @@ -36,29 +36,29 @@ export RSYNC_CMD="rsync --archive --safe-links --ignore-errors --quiet --no-perm # things to this list please alphabetize the entries so it's # easy for folks to find and adjust items as needed. COMMON_ETC_LOG_NAMES="apt \ - apache2 \ - haproxy \ - httpd \ - memcached \ - mongodb \ - my.cnf \ - mysql \ - netplan \ - network \ - nginx \ - pip.conf \ - qpid-dispatch \ - rabbitmq \ - repo \ - resolv.conf \ - rsyslog \ - sasl2 \ - sysconfig/network-scripts \ - sysconfig/network \ - systemd/network \ - yum \ - yum.repos.d \ - zypp" + apache2 \ + haproxy \ + httpd \ + memcached \ + mongodb \ + my.cnf \ + mysql \ + netplan \ + network \ + nginx \ + pip.conf \ + qpid-dispatch \ + rabbitmq \ + repo \ + resolv.conf \ + rsyslog \ + sasl2 \ + sysconfig/network-scripts \ + sysconfig/network \ + systemd/network \ + yum \ + yum.repos.d \ + zypp" COMMON_ETC_LOG_NAMES+=" $(awk -F'os_' '/name.*os_.*/ {print $2}' $(dirname $(readlink -f ${BASH_SOURCE[0]}))/../ansible-role-requirements.yml | tr '\n' ' ')" @@ -95,67 +95,67 @@ function repo_information { } function store_artifacts { - # Store known artifacts only if they exist. If the target directory does - # exist, it will be created. - # USAGE: store_artifacts /src/to/artifacts /path/to/store - if sudo test -e "${1}"; then - if [[ ! -d "${2}" ]]; then - mkdir -vp "${2}" + # Store known artifacts only if they exist. If the target directory does + # exist, it will be created. + # USAGE: store_artifacts /src/to/artifacts /path/to/store + if sudo test -e "${1}"; then + if [[ ! -d "${2}" ]]; then + mkdir -vp "${2}" + fi + echo "Running artifact sync for \"${1}\" to \"${2}\"" + sudo ${RSYNC_CMD} ${1} ${2} || true fi - echo "Running artifact sync for \"${1}\" to \"${2}\"" - sudo ${RSYNC_CMD} ${1} ${2} || true - fi } function store_journal_artifacts { - # Store lines from a known unit's journal as a plain-text log file. - # USAGE: store_journal_artifacts UNIT_TO_MATCH /path/to/store - if [ $? == 0 ]; then - if [[ ! -d "${2}" ]]; then - mkdir -vp "${2}" + # Store lines from a known unit's journal as a plain-text log file. + # USAGE: store_journal_artifacts UNIT_TO_MATCH /path/to/store + if [ $? == 0 ]; then + if [[ ! -d "${2}" ]]; then + mkdir -vp "${2}" + fi + if [[ ${3:-false} != false ]]; then + if [[ -f "${3}/system.journal" ]]; then + SYSTEMD_UNITS=$(sudo journalctl --file="${3}/system.journal" -F _SYSTEMD_UNIT | grep "${service}") + for service_unit in $(echo -e "${SYSTEMD_UNITS}"); do + echo "Pulling journal for ${service_unit}" + sudo journalctl --file="${3}/system.journal" \ + --unit="${service_unit}" | sudo tee "${2}/${service_unit}.journal-${TS}.log" &>/dev/null + done + fi + else + SYSTEMD_UNITS=$(sudo journalctl --output=json-pretty -F _SYSTEMD_UNIT | grep "${service}") + for service_unit in $(echo -e "${SYSTEMD_UNITS}"); do + echo "Pulling journal for ${service_unit}" + sudo journalctl --unit="${service_unit}" | sudo tee "${2}/${service_unit}.journal-${TS}.log" &>/dev/null + done + fi fi - if [[ ${3:-false} != false ]]; then - if [[ -f "${3}/system.journal" ]]; then - SYSTEMD_UNITS=$(sudo journalctl --file="${3}/system.journal" -F _SYSTEMD_UNIT | grep "${service}") - for service_unit in $(echo -e "${SYSTEMD_UNITS}"); do - echo "Pulling journal for ${service_unit}" - sudo journalctl --file="${3}/system.journal" \ - --unit="${service_unit}" | sudo tee "${2}/${service_unit}.journal-${TS}.log" &>/dev/null - done - fi - else - SYSTEMD_UNITS=$(sudo journalctl --output=json-pretty -F _SYSTEMD_UNIT | grep "${service}") - for service_unit in $(echo -e "${SYSTEMD_UNITS}"); do - echo "Pulling journal for ${service_unit}" - sudo journalctl --unit="${service_unit}" | sudo tee "${2}/${service_unit}.journal-${TS}.log" &>/dev/null - done - fi - fi } function find_files { - find "${WORKING_DIR}/logs/" -type f \ - ! -name "*.gz" \ - ! -name '*.html' \ - ! -name '*.subunit' \ - ! -name "*.journal" \ - ! -name 'ansible.sqlite' | grep -v 'stackviz' + find "${WORKING_DIR}/logs/" -type f \ + ! -name "*.gz" \ + ! -name '*.html' \ + ! -name '*.subunit' \ + ! -name "*.journal" \ + ! -name 'ansible.sqlite' | grep -v 'stackviz' } function rename_files { - find_files |\ - while read filename; do \ - mv ${filename} ${filename}.txt || echo "WARNING: Could not rename ${filename}"; \ - done + find_files |\ + while read filename; do \ + mv ${filename} ${filename}.txt || echo "WARNING: Could not rename ${filename}"; \ + done } function compress_files { - # We use 'command' to ensure that we're not executing with an alias. - GZIP_CMD="command gzip --force --best" - find_files |\ - while read filename; do \ - ${GZIP_CMD} ${filename} || echo "WARNING: Could not gzip ${filename}"; \ - done + # We use 'command' to ensure that we're not executing with an alias. + GZIP_CMD="command gzip --force --best" + find_files |\ + while read filename; do \ + ${GZIP_CMD} ${filename} || echo "WARNING: Could not gzip ${filename}"; \ + done } ## Main ---------------------------------------------------------------------- @@ -206,31 +206,31 @@ done # Gather container etc artifacts if which lxc-ls &> /dev/null; then - for CONTAINER_NAME in $(sudo lxc-ls -1); do - CONTAINER_PID=$(sudo lxc-info -p -n ${CONTAINER_NAME} | awk '{print $2}') - ETC_DIR="/proc/${CONTAINER_PID}/root/etc" - MACHINE_ID="$(sudo cat ${ETC_DIR}/machine-id)" - LOG_DIR="/proc/${CONTAINER_PID}/root/var/log" - JOURNAL_DIR="/proc/${CONTAINER_PID}/root/run/log/journal/${MACHINE_ID}" - repo_information ${CONTAINER_NAME} - PIDS=() - for service in ${COMMON_ETC_LOG_NAMES}; do - echo "Running in container collection for service ${service}" - store_artifacts ${ETC_DIR}/${service} "${WORKING_DIR}/logs/etc/openstack/${CONTAINER_NAME}/" & - pid=$! - PIDS[${pid}]=${pid} - store_artifacts ${LOG_DIR}/${service} "${WORKING_DIR}/logs/openstack/${CONTAINER_NAME}/" & - pid=$! - PIDS[${pid}]=${pid} - store_journal_artifacts ${service} "${WORKING_DIR}/logs/openstack/${CONTAINER_NAME}" "${JOURNAL_DIR}" & - pid=$! - PIDS[${pid}]=${pid} + for CONTAINER_NAME in $(sudo lxc-ls -1); do + CONTAINER_PID=$(sudo lxc-info -p -n ${CONTAINER_NAME} | awk '{print $2}') + ETC_DIR="/proc/${CONTAINER_PID}/root/etc" + MACHINE_ID="$(sudo cat ${ETC_DIR}/machine-id)" + LOG_DIR="/proc/${CONTAINER_PID}/root/var/log" + JOURNAL_DIR="/proc/${CONTAINER_PID}/root/run/log/journal/${MACHINE_ID}" + repo_information ${CONTAINER_NAME} + PIDS=() + for service in ${COMMON_ETC_LOG_NAMES}; do + echo "Running in container collection for service ${service}" + store_artifacts ${ETC_DIR}/${service} "${WORKING_DIR}/logs/etc/openstack/${CONTAINER_NAME}/" & + pid=$! + PIDS[${pid}]=${pid} + store_artifacts ${LOG_DIR}/${service} "${WORKING_DIR}/logs/openstack/${CONTAINER_NAME}/" & + pid=$! + PIDS[${pid}]=${pid} + store_journal_artifacts ${service} "${WORKING_DIR}/logs/openstack/${CONTAINER_NAME}" "${JOURNAL_DIR}" & + pid=$! + PIDS[${pid}]=${pid} + done + echo "Waiting for container collection jobs for ${CONTAINER_NAME} to finish" + for job_pid in ${!PIDS[@]}; do + wait ${PIDS[$job_pid]} || exit 99 + done done - echo "Waiting for container collection jobs for ${CONTAINER_NAME} to finish" - for job_pid in ${!PIDS[@]}; do - wait ${PIDS[$job_pid]} || exit 99 - done - done fi @@ -258,10 +258,10 @@ repo_information host # Record the active interface configs if which ethtool &> /dev/null; then - for interface in $(ip -o link | awk -F':' '{print $2}' | sed 's/@.*//g'); do - echo "ethtool -k ${interface}" - ethtool -k ${interface} > "${WORKING_DIR}/logs/ethtool-${interface}-${TS}-cfg.txt" || true - done + for interface in $(ip -o link | awk -F':' '{print $2}' | sed 's/@.*//g'); do + echo "ethtool -k ${interface}" + ethtool -k ${interface} > "${WORKING_DIR}/logs/ethtool-${interface}-${TS}-cfg.txt" || true + done else echo "No ethtool available" | tee -a "${WORKING_DIR}/logs/ethtool-${TS}-${interface}-cfg.txt" fi