From f845081bb59d2ad2b19ada992770b7d5ee1274e3 Mon Sep 17 00:00:00 2001 From: "Sigunov, Vladimir (vs422h)" Date: Mon, 21 Mar 2022 17:12:07 -0400 Subject: [PATCH] [DATABASE] Code improvement This is to cover some relatively rare sutuation, when backups of different databases can share the same storage. Change-Id: I0770e1baf3d33e2d56c34558a9a97a99a01e5e04 --- helm-toolkit/Chart.yaml | 2 +- .../db-backup-restore/_backup_main.sh.tpl | 97 ++++++++++++++++--- releasenotes/notes/helm-toolkit.yaml | 1 + 3 files changed, 85 insertions(+), 15 deletions(-) diff --git a/helm-toolkit/Chart.yaml b/helm-toolkit/Chart.yaml index 3de988420..66ba9ac95 100644 --- a/helm-toolkit/Chart.yaml +++ b/helm-toolkit/Chart.yaml @@ -15,7 +15,7 @@ apiVersion: v1 appVersion: v1.0.0 description: OpenStack-Helm Helm-Toolkit name: helm-toolkit -version: 0.2.34 +version: 0.2.35 home: https://docs.openstack.org/openstack-helm icon: https://www.openstack.org/themes/openstack/images/project-mascots/OpenStack-Helm/OpenStack_Project_OpenStackHelm_vertical.png sources: diff --git a/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl b/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl index cdc9ff561..db1291566 100755 --- a/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl +++ b/helm-toolkit/templates/scripts/db-backup-restore/_backup_main.sh.tpl @@ -271,25 +271,63 @@ store_backup_remotely() { # possible case, when we have several backups of the same date. E.g. # one manual, and one automatic. -declare -A FILETABLE +function get_archive_date(){ +# get_archive_date function returns correct archive date +# for different formats of archives' names +# the old one: ....tar.gz +# the new one: ..
...tar.gz +local A_FILE="$1" +local A_DATE="" +if [[ -z ${BACK_UP_MODE} ]]; then + A_DATE=$( awk -F/ '{print $NF}' <<< ${ARCHIVE_FILE} | cut -d'.' -f 4 | tr -d "Z") +else + A_DATE=$( awk -F/ '{print $NF}' <<< ${ARCHIVE_FILE} | cut -d'.' -f 5 | tr -d "Z") +fi +echo ${A_DATE} +} + +declare -A fileTable create_hash_table() { -unset FILETABLE +unset fileTable fileList=$@ for ARCHIVE_FILE in ${fileList}; do - ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4) # Creating index, we will round given ARCHIVE_DATE to the midnight (00:00:00) # to take in account a possibility, that we can have more than one scheduled # backup per day. - INDEX=$(seconds_difference $(date --date $ARCHIVE_DATE +"%D")) - if [[ -z FILETABLE[${INDEX}] ]]; then - FILETABLE[${INDEX}]=${ARCHIVE_FILE} + ARCHIVE_DATE=$(get_archive_date ${ARCHIVE_FILE}) + ARCHIVE_DATE=$(date --date=${ARCHIVE_DATE} +%D) + log INFO "${DB_NAME}_backup" "Archive date to build index: ${ARCHIVE_DATE}" + INDEX=$(seconds_difference ${ARCHIVE_DATE}) + if [[ -z fileTable[${INDEX}] ]]; then + fileTable[${INDEX}]=${ARCHIVE_FILE} else - FILETABLE[${INDEX}]="${FILETABLE[${INDEX}]} ${ARCHIVE_FILE}" + fileTable[${INDEX}]="${fileTable[${INDEX}]} ${ARCHIVE_FILE}" fi - echo "INDEX: ${INDEX} VALUE: ${FILETABLE[${INDEX}]}" + echo "INDEX: ${INDEX} VALUE: ${fileTable[${INDEX}]}" done } +function get_backup_prefix() { +# Create list of all possible prefixes in a format: +# . to cover a possible situation +# when different backups of different databases and/or +# namespaces share the same local or remote storage. + ALL_FILES=($@) + PREFIXES=() + for fname in ${ALL_FILES[@]}; do + prefix=$(basename ${fname} | cut -d'.' -f1,2 ) + for ((i=0; i<${#PREFIXES[@]}; i++)) do + if [[ ${PREFIXES[${i}]} == ${prefix} ]]; then + prefix="" + break + fi + done + if [[ ! -z ${prefix} ]]; then + PREFIXES+=(${prefix}) + fi + done +} + remove_old_local_archives() { if [[ -d $ARCHIVE_DIR ]]; then count=0 @@ -317,6 +355,33 @@ remove_old_local_archives() { fi } +remove_old_local_archives() { + SECONDS_TO_KEEP=$(( $((${LOCAL_DAYS_TO_KEEP}))*86400)) + log INFO "${DB_NAME}_backup" "Deleting backups older than ${LOCAL_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)" + if [[ -d $ARCHIVE_DIR ]]; then + count=0 + # We iterate over the hash table, checking the delta in seconds (hash keys), + # and minimum number of backups we must have in place. List of keys has to be sorted. + for INDEX in $(tr " " "\n" <<< ${!fileTable[@]} | sort -n -); do + ARCHIVE_FILE=${fileTable[${INDEX}]} + if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${LOCAL_DAYS_TO_KEEP} ]]; then + ((count++)) + log INFO "${DB_NAME}_backup" "Keeping file(s) ${ARCHIVE_FILE}." + else + log INFO "${DB_NAME}_backup" "Deleting file(s) ${ARCHIVE_FILE}." + rm -f ${ARCHIVE_FILE} + if [[ $? -ne 0 ]]; then + # Log error but don't exit so we can finish the script + # because at this point we haven't sent backup to RGW yet + log ERROR "${DB_NAME}_backup" "Failed to cleanup local backup. Cannot remove some of ${ARCHIVE_FILE}" + fi + fi + done + else + log WARN "${DB_NAME}_backup" "The local backup directory ${$ARCHIVE_DIR} does not exist." + fi +} + prepare_list_of_remote_backups() { BACKUP_FILES=$(mktemp -p /tmp) DB_BACKUP_FILES=$(mktemp -p /tmp) @@ -332,18 +397,18 @@ prepare_list_of_remote_backups() { # The logic implemented with this function is absolutely similar # to the function remove_old_local_archives (see above) remove_old_remote_archives() { - log INFO "${DB_NAME}_backup" "Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days" count=0 SECONDS_TO_KEEP=$((${REMOTE_DAYS_TO_KEEP}*86400)) + log INFO "${DB_NAME}_backup" "Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)" for INDEX in $(tr " " "\n" <<< ${!FILETABLE[@]} | sort -n -); do ARCHIVE_FILE=${FILETABLE[${INDEX}]} - if [[ ${INDEX} -le ${SECONDS_TO_KEEP} || ${count} -lt ${REMOTE_DAYS_TO_KEEP} ]]; then + if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${REMOTE_DAYS_TO_KEEP} ]]; then ((count++)) log INFO "${DB_NAME}_backup" "Keeping remote backup(s) ${ARCHIVE_FILE}." else log INFO "${DB_NAME}_backup" "Deleting remote backup(s) ${ARCHIVE_FILE} from the RGW" - openstack object delete ${CONTAINER_NAME} ${ARCHIVE_FILE} || log_backup_error_exit \ - "Failed to cleanup remote backup. Cannot delete container object ${ARCHIVE_FILE}!" + openstack object delete ${CONTAINER_NAME} ${ARCHIVE_FILE} || log WARN "${DB_NAME}_backup" \ + "Failed to cleanup remote backup. Cannot delete container object ${ARCHIVE_FILE}" fi done @@ -416,8 +481,12 @@ backup_databases() { #Only delete the old archive after a successful archive export LOCAL_DAYS_TO_KEEP=$(echo $LOCAL_DAYS_TO_KEEP | sed 's/"//g') if [[ "$LOCAL_DAYS_TO_KEEP" -gt 0 ]]; then - create_hash_table $(ls -1 $ARCHIVE_DIR/*.gz) - remove_old_local_archives + get_backup_prefix $(ls -1 ${ARCHIVE_DIR}/*.gz) + for ((i=0; i<${#PREFIXES[@]}; i++)); do + echo "Working with prefix: ${PREFIXES[i]}" + create_hash_table $(ls -1 ${ARCHIVE_DIR}/${PREFIXES[i]}*.gz) + remove_old_local_archives + done fi REMOTE_BACKUP=$(echo $REMOTE_BACKUP_ENABLED | sed 's/"//g') diff --git a/releasenotes/notes/helm-toolkit.yaml b/releasenotes/notes/helm-toolkit.yaml index af1a276cc..f5af754a0 100644 --- a/releasenotes/notes/helm-toolkit.yaml +++ b/releasenotes/notes/helm-toolkit.yaml @@ -41,4 +41,5 @@ helm-toolkit: - 0.2.32 Consolidate mon_endpoints discovery - 0.2.33 Remove set -x - 0.2.34 Modify database backup logic to maintain minimum number of backups + - 0.2.35 Database B/R improvements ...