Skip to content

Commit

Permalink
Merge pull request #266 from yuki462-b/update-discovery-scripts-for-4…
Browse files Browse the repository at this point in the history
….8.6

Updating backup restore scripts for WD 4.8.6.
  • Loading branch information
michelle-miller authored Sep 9, 2024
2 parents 39b7526 + 6ad8e99 commit e536b21
Show file tree
Hide file tree
Showing 7 changed files with 187 additions and 64 deletions.
37 changes: 19 additions & 18 deletions discovery-data/latest/elastic-backup-restore.sh
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,10 @@ if [ $(compare_version ${WD_VERSION} "4.7.0") -ge 0 ] ; then
create_elastic_shared_pvc
# Mount shared volume
oc ${OC_ARGS} patch wd "${TENANT_NAME}" --type merge --patch "{\"spec\": {\"elasticsearch\": {\"sharedStoragePvc\": \"${ELASTIC_SHARED_PVC}\"}}}"
ELASTIC_DATA_STS=$(oc ${OC_ARGS} get sts -l "icpdsupport/addOnId=discovery,icpdsupport/app=elastic,tenant=${TENANT_NAME},ibm-es-data=True" -o jsonpath='{.items[*].metadata.name}')
while :
do
test -n "$(oc ${OC_ARGS} get sts ${TENANT_NAME}-ibm-elasticsearch-es-server-data -o jsonpath="{..volumes[?(@.persistentVolumeClaim.claimName==\"${ELASTIC_SHARED_PVC}\")]}")" && break
test -n "$(oc ${OC_ARGS} get sts ${ELASTIC_DATA_STS} -o jsonpath="{..volumes[?(@.persistentVolumeClaim.claimName==\"${ELASTIC_SHARED_PVC}\")]}")" && break
brlog "INFO" "Wait for ElasticSearch to mount shared PVC"
sleep 30
done
Expand All @@ -97,7 +98,7 @@ if [ $(compare_version ${WD_VERSION} "4.7.0") -ge 0 ] ; then
sleep 30
# Update configmap
brlog "INFO" "Update ConfigMap for ElastisSearch configuration"
oc ${OC_ARGS} rollout status sts "${TENANT_NAME}-ibm-elasticsearch-es-server-data"
oc ${OC_ARGS} rollout status sts "${ELASTIC_DATA_STS}"
for cm in $(oc ${OC_ARGS} get cm -l "icpdsupport/addOnId=discovery,icpdsupport/app=elastic,tenant=${TENANT_NAME}" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -v "cpdbr")
do
update_elastic_configmap "${cm}"
Expand Down Expand Up @@ -242,14 +243,14 @@ function clean_up(){
done ' ${OC_ARGS} -c elasticsearch
echo

if [ $(compare_version ${WD_VERSION} "5.0.0") -lt 0 ]; then
if [ $(compare_version ${WD_VERSION} "4.8.6") -lt 0 ]; then
run_cmd_in_pod ${ELASTIC_POD} 'export ELASTIC_ENDPOINT=https://localhost:9200 && \
curl -XPUT -s -k -u ${ELASTIC_USER}:${ELASTIC_PASSWORD} "${ELASTIC_ENDPOINT}/_cluster/settings" -H "Content-Type: application/json" -d"{\"transient\": {\"discovery.zen.commit_timeout\": null, \"discovery.zen.publish_timeout\": null}}"' ${OC_ARGS} -c elasticsearch
fi

if [ $(compare_version ${WD_VERSION} "4.7.0") -lt 0 ] ; then
start_minio_port_forward
${MC} "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
"${MC}" "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
stop_minio_port_forward
echo
else
Expand All @@ -272,11 +273,11 @@ function clean_up(){
brlog "INFO" "Waiting for ElasticSearch pod start up"
while :
do
oc ${OC_ARGS} get sts ${TENANT_NAME}-ibm-elasticsearch-es-server-data &> /dev/null && break
oc ${OC_ARGS} get sts "${ELASTIC_DATA_STS}" &> /dev/null && break
brlog "INFO" "Wait for ElasticSearch statefulset"
sleep 30
done
oc ${OC_ARGS} rollout status sts "${TENANT_NAME}-ibm-elasticsearch-es-server-data"
oc ${OC_ARGS} rollout status sts "${ELASTIC_DATA_STS}"
fi
fi
}
Expand All @@ -294,9 +295,9 @@ if [ ${COMMAND} = 'backup' ] ; then
if [ $(compare_version ${WD_VERSION} "4.7.0") -lt 0 ] ; then
# Clean up MinIO
start_minio_port_forward
${MC} "${MC_OPTS[@]}" config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
if [ -n "$(${MC} "${MC_OPTS[@]}" ls wdminio/${ELASTIC_BACKUP_BUCKET}/)" ] ; then
${MC} "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
"${MC}" "${MC_OPTS[@]}" config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
if [ -n "$("${MC}" "${MC_OPTS[@]}" ls wdminio/${ELASTIC_BACKUP_BUCKET}/)" ] ; then
"${MC}" "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
fi
stop_minio_port_forward
else
Expand All @@ -319,12 +320,12 @@ if [ ${COMMAND} = 'backup' ] ; then
brlog "INFO" "Transfering snapshot from MinIO"
cat << EOF >> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
===================================================
${MC} ${MC_OPTS[@]} mirror wdminio/${ELASTIC_BACKUP_BUCKET} ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET}"
"${MC}" ${MC_OPTS[@]} mirror wdminio/${ELASTIC_BACKUP_BUCKET} ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET}"
===================================================
EOF
set +e
start_minio_port_forward
${MC} "${MC_OPTS[@]}" mirror wdminio/${ELASTIC_BACKUP_BUCKET} ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
"${MC}" "${MC_OPTS[@]}" mirror wdminio/${ELASTIC_BACKUP_BUCKET} ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
RC=$?
stop_minio_port_forward
echo "RC=${RC}" >> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
Expand Down Expand Up @@ -382,27 +383,27 @@ if [ "${COMMAND}" = 'restore' ] ; then
tar "${ELASTIC_TAR_OPTIONS[@]}" -xf ${BACKUP_FILE} -C ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET}/${ELASTIC_SNAPSHOT_PATH}
brlog "INFO" "Transferring data to MinIO..."
start_minio_port_forward
${MC} "${MC_OPTS[@]}" config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
if [ -n "$(${MC} "${MC_OPTS[@]}" ls wdminio/${ELASTIC_BACKUP_BUCKET}/)" ] ; then
${MC} "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
"${MC}" "${MC_OPTS[@]}" config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
if [ -n "$("${MC}" "${MC_OPTS[@]}" ls wdminio/${ELASTIC_BACKUP_BUCKET}/)" ] ; then
"${MC}" "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
fi
stop_minio_port_forward
set +e
cat << EOF >> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
===================================================
${MC} ${MC_OPTS[@]} mirror --debug ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} wdminio/${ELASTIC_BACKUP_BUCKET}
"${MC}" ${MC_OPTS[@]} mirror --debug ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} wdminio/${ELASTIC_BACKUP_BUCKET}
===================================================
EOF
start_minio_port_forward
${MC} "${MC_OPTS[@]}" mirror ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} wdminio/${ELASTIC_BACKUP_BUCKET} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
"${MC}" "${MC_OPTS[@]}" mirror ${TMP_WORK_DIR}/${ELASTIC_BACKUP_DIR}/${ELASTIC_BACKUP_BUCKET} wdminio/${ELASTIC_BACKUP_BUCKET} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
RC=$?
stop_minio_port_forward
echo "RC=${RC}" >> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
if [ $RC -ne 0 ] ; then
brlog "ERROR" "Some files could not be transfered. Consider to use '--use-job' and '--pvc' option. Please see help (--help) for details."
brlog "INFO" "Clean up"
start_minio_port_forward
${MC} "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
"${MC}" "${MC_OPTS[@]}" rm --recursive --force --dangerous wdminio/${ELASTIC_BACKUP_BUCKET}/ > /dev/null
stop_minio_port_forward
exit 1
fi
Expand All @@ -421,7 +422,7 @@ EOF
curl -XPUT --fail -s -k -u ${ELASTIC_USER}:${ELASTIC_PASSWORD} "${ELASTIC_ENDPOINT}/_snapshot/'${ELASTIC_REPO}'?master_timeout='${ELASTIC_REQUEST_TIMEOUT}'" -H "Content-Type: application/json" '"${REPO_CONFIGURATION}"' && \
curl -XPOST --fail -s -k -u ${ELASTIC_USER}:${ELASTIC_PASSWORD} "${ELASTIC_ENDPOINT}/_snapshot/'${ELASTIC_REPO}'/'${ELASTIC_SNAPSHOT}'/_restore?master_timeout='${ELASTIC_REQUEST_TIMEOUT}'" -H "Content-Type: application/json" -d"{\"indices\": \"*,-application_logs-*\", \"expand_wildcards\": \"all\", \"allow_no_indices\": \"true\"}" | grep accepted && echo ' ${OC_ARGS} -c elasticsearch

if [ $(compare_version ${WD_VERSION} "5.0.0") -lt 0 ]; then
if [ $(compare_version ${WD_VERSION} "4.8.6") -lt 0 ]; then
run_cmd_in_pod ${ELASTIC_POD} 'export ELASTIC_ENDPOINT=https://localhost:9200 && \
curl -XPUT --fail -s -k -u ${ELASTIC_USER}:${ELASTIC_PASSWORD} "${ELASTIC_ENDPOINT}/_cluster/settings" -H "Content-Type: application/json" -d"{\"transient\": {\"discovery.zen.commit_timeout\": \"'${ELASTIC_REQUEST_TIMEOUT}'\", \"discovery.zen.publish_timeout\": \"'${ELASTIC_REQUEST_TIMEOUT}'\"}}" ' ${OC_ARGS} -c elasticsearch
fi
Expand Down
92 changes: 79 additions & 13 deletions discovery-data/latest/lib/function.bash
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,17 @@ trap_add(){
trap "${cmd}" 0 1 2 3 15
}

trap_remove(){
# Remove $1 command from 'trap_commands'.
trap_commands=( "${trap_commands[@]/$1}" )
trap_remove() {
local element_to_remove="$1"

# Remove element_to_remove from trap_commands array.
new_trap_commands=()
for element in "${trap_commands[@]}"; do
if [[ "$element" != "$element_to_remove" ]]; then
new_trap_commands+=("$element")
fi
done
trap_commands=("${new_trap_commands[@]}")

# NOTE: if 'trap_commands' is empty, "${trap_commands}" cause unbound variable error when set -u.
if [[ -z "${trap_commands[@]}" ]]; then
Expand Down Expand Up @@ -142,6 +150,8 @@ get_version(){
fi
}

# Usage: compare_version VER_1 VER_2.
# Output: return -1 if VER_1 < VER_2, 0 if VER_1 == VER_2, and +1 if VER_1 > VER_2.
compare_version(){
VER_1=(${1//./ })
VER_2=(${2//./ })
Expand Down Expand Up @@ -397,6 +407,62 @@ get_mc(){
fi
}

# Compare two timestamps (i.e. 2024-06-12T14:34:03Z).
# Return 1 if timestamp1 > timestamp2 else 0.
compare_timestamps() {
local timestamp1="$1"
local timestamp2="$2"

# Convert timestamps to seconds since epoch
local epoch1=$(date -d "$timestamp1" +%s)
local epoch2=$(date -d "$timestamp2" +%s)

# Compare the two epoch values
if [ "$epoch1" -gt "$epoch2" ]; then
echo "1"
else
echo "0"
fi
}

# Extract timestamp from `mc --version` output.
get_mc_version() {
local version_output
version_output=$("${MC}" --version 2>/dev/null)

local timestamp
timestamp=$(echo "$version_output" | grep -oP '(?<=mc version RELEASE\.)[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}-[0-9]{2}-[0-9]{2}Z')

if [[ -z "$timestamp" ]]; then
echo "Error extracting timestamp"
return 1
fi

# Correct format (replace hyphens with colons only in time part).
timestamp=$(echo "$timestamp" | sed -E 's/(T[0-9]{2})-([0-9]{2})-([0-9]{2}Z)/\1:\2:\3/')
echo "$timestamp"
}

# Judge if the --retry option is available in mc mirror command.
# If the option is available return 1 else 0.
has_mc_mirror_retry() {
local timestamp_current
local timestamp_comparison="2023-10-24T05:18:28Z"

timestamp_current=$(get_mc_version)

if [[ $? -ne 0 ]]; then
echo "Error extracting timestamp"
return 1
fi

if [ "$(compare_timestamps "$timestamp_current" "$timestamp_comparison")" -eq 1 ]; then
echo "1"
else
echo "0"
fi
}

start_minio_port_forward(){
touch ${TMP_WORK_DIR}/keep_minio_port_forward
trap "rm -f ${TMP_WORK_DIR}/keep_minio_port_forward" 0 1 2 3 15
Expand All @@ -408,7 +474,7 @@ keep_minio_port_forward(){
while [ -e ${TMP_WORK_DIR}/keep_minio_port_forward ]
do
if [ -n "${S3_NAMESPACE+UNDEF}" ] ; then
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward "svc/${S3_PORT_FORWARD_SVC}" "${S3_FORWARD_PORT}:${S3_PORT}" &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
else
oc ${OC_ARGS} port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
fi
Expand Down Expand Up @@ -488,7 +554,7 @@ Backup/Restore failed.
You can restart ${COMMAND} with adding "--continue-from" option:
ex) ./all-backup-restore.sh ${COMMAND} -f ${BACKUP_FILE} --continue-from ${CURRENT_COMPONENT} ${RETRY_ADDITIONAL_OPTION:-}
You can unquiesce WatsonDiscovery by this command:
oc patch wd wd --type merge --patch '{"spec": {"shared": {"quiesce": {"enabled": false}}}}'
oc patch wd ${TENANT_NAME} --type merge --patch '{"spec": {"shared": {"quiesce": {"enabled": false}}}}'
EOS
)
brlog "ERROR" "${message}"
Expand Down Expand Up @@ -577,10 +643,10 @@ get_migrator_image(){
if [ $(compare_version "${wd_version}" "4.6.0") -le 0 ] ; then
echo "$(get_migrator_repo):${MIGRATOR_TAG:-$(get_migrator_tag)}"
else
utils_repo="$(oc get watsondiscoveryapi wd -o jsonpath='{.spec.shared.dockerRegistryPrefix}')"
utils_image="$(oc get watsondiscoveryapi wd -o jsonpath='{.spec.shared.initContainer.utils.image.name}')"
utils_tag="$(oc get watsondiscoveryapi wd -o jsonpath='{.spec.shared.initContainer.utils.image.tag}')"
utils_digest="$(oc get watsondiscoveryapi wd -o jsonpath='{.spec.shared.initContainer.utils.image.digest}')"
utils_repo="$(oc get watsondiscoveryapi ${TENANT_NAME} -o jsonpath='{.spec.shared.dockerRegistryPrefix}')"
utils_image="$(oc get watsondiscoveryapi ${TENANT_NAME} -o jsonpath='{.spec.shared.initContainer.utils.image.name}')"
utils_tag="$(oc get watsondiscoveryapi ${TENANT_NAME} -o jsonpath='{.spec.shared.initContainer.utils.image.tag}')"
utils_digest="$(oc get watsondiscoveryapi ${TENANT_NAME} -o jsonpath='{.spec.shared.initContainer.utils.image.digest}')"
echo "${utils_repo}/${utils_image}:${utils_tag}@${utils_digest}"
fi
}
Expand Down Expand Up @@ -1351,7 +1417,7 @@ create_restore_instance_mappings(){
fi
done
else
brlog "INFO" "No Discovery instance exist. Create new one."
brlog "INFO" "No Discovery instance exist. Creating a new one."
local src_instances=( $(fetch_cmd_result ${ZEN_ACCESS_POD} "jq -r '.instance_mappings[].source_instance_id' /tmp/mapping.json") )
local display_names=( $(fetch_cmd_result ${ZEN_ACCESS_POD} "jq -r '.instance_mappings[].display_name' /tmp/mapping.json") )
for i in "${!src_instances[@]}"
Expand All @@ -1375,7 +1441,7 @@ create_restore_instance_mappings(){
}

check_instance_mappings(){
brlog "INFO" "Check instance mapping"
brlog "INFO" "Checking instance mapping"
if [ -z "${MAPPING_FILE:+UNDEF}" ] ; then
brlog "INFO" "Mapping file is not specified"
export MAPPING_FILE="${BACKUP_DIR}/instance_mapping.json"
Expand Down Expand Up @@ -1466,11 +1532,11 @@ create_service_instance(){
fi
fi
if [ -z "${ZEN_UID+UNDEF}" ] ; then
brlog "INFO" "Get CP4D user ID for ${ZEN_USER_NAME}" >&2
brlog "INFO" "Getting CP4D user ID for ${ZEN_USER_NAME}" >&2
token="$(fetch_cmd_result ${ZEN_ACCESS_POD} "curl -ks '${ZEN_CORE_API_ENDPOINT}/internal/v1/service_token?expiration_time=1000' -H 'secret: ${ZEN_CORE_TOKEN}' -H 'cache-control: no-cache' | jq -r .token")"
ZEN_UID="$(fetch_cmd_result ${ZEN_ACCESS_POD} "curl -ks '${ZEN_CORE_API_ENDPOINT}/openapi/v1/users/${ZEN_USER_NAME}' -H 'Authorization: Bearer ${token}' | jq -r '.UserInfo.uid'")"
fi
brlog "INFO" "Create Discovery instance as ${ZEN_USER_NAME}:${ZEN_UID}" >&2
brlog "INFO" "Creating Discovery instance as ${ZEN_USER_NAME}:${ZEN_UID}" >&2
local token=$(fetch_cmd_result ${ZEN_ACCESS_POD} "curl -ks '${ZEN_CORE_API_ENDPOINT}/internal/v1/service_token?uid=${ZEN_UID}&username=${ZEN_USER_NAME}&display_name=${ZEN_USER_NAME}' -H 'secret: ${ZEN_CORE_TOKEN}' -H 'cache-control: no-cache' | jq -r .token")
local instance_id=$(fetch_cmd_result ${ZEN_ACCESS_POD} "curl -ks -X POST '${WATSON_GATEWAY_ENDPOINT}/api/ibmcloud/resource-controller/resource_instances' -H 'Authorization: Bearer ${token}' -H 'Content-Type: application/json' -d@/tmp/request.json | jq -r 'if .zen_id == null or .zen_id == \"\" then \"null\" else .zen_id end'")
if [ "${instance_id}" != "null" ] ; then
Expand Down
27 changes: 19 additions & 8 deletions discovery-data/latest/minio-backup-restore.sh
Original file line number Diff line number Diff line change
Expand Up @@ -113,20 +113,30 @@ fi
export MINIO_CONFIG_DIR="${PWD}/${TMP_WORK_DIR}/.mc"
MC_OPTS=(--config-dir ${MINIO_CONFIG_DIR} --insecure)

# mc mirror command options.
MIRROR_OPTS=(--quiet)
# NOTE: --retry flag is currently a boolean flag and cannot specify the how many times to retry.
if [ "$(has_mc_mirror_retry)" -eq 1 ]; then
MIRROR_OPTS+=("--retry")
fi
if [[ -z "${LOG_LEVEL_NUM:+UNDEF}" ]] || [ "$LOG_LEVEL_NUM" -ge 3 ]; then
MIRROR_OPTS+=("--debug")
fi

BUCKET_SUFFIX="$(get_bucket_suffix)"

# backup
if [ "${COMMAND}" = "backup" ] ; then
brlog "INFO" "Start backup minio"
brlog "INFO" "Backup data..."
start_minio_port_forward
${MC} "${MC_OPTS[@]}" --quiet config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
"${MC}" "${MC_OPTS[@]}" --quiet config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
EXCLUDE_OBJECTS=$(cat "${SCRIPT_DIR}/src/minio_exclude_paths")
if [ $(compare_version "$(get_version)" "4.7.0") -ge 0 ] ; then
EXCLUDE_OBJECTS+=$'\n'
EXCLUDE_OBJECTS+="$(cat "${SCRIPT_DIR}/src/mcg_exclude_paths")"
fi
for bucket in $(${MC} "${MC_OPTS[@]}" ls wdminio | sed ${SED_REG_OPT} "s|.*[0-9]+B\ (.*)/.*|\1|g" | grep -v ${ELASTIC_BACKUP_BUCKET})
for bucket in $("${MC}" "${MC_OPTS[@]}" ls wdminio | sed ${SED_REG_OPT} "s|.*[0-9]+B\ (.*)/.*|\1|g" | grep -v ${ELASTIC_BACKUP_BUCKET})
do
EXTRA_MC_MIRROR_COMMAND=()
ORG_IFS=${IFS}
Expand All @@ -145,7 +155,7 @@ if [ "${COMMAND}" = "backup" ] ; then
IFS=${ORG_IFS}
cd ${TMP_WORK_DIR}
set +e
${MC} "${MC_OPTS[@]}" --quiet mirror "${EXTRA_MC_MIRROR_COMMAND[@]}" wdminio/${bucket} ${MINIO_BACKUP_DIR}/${bucket} &>> "${SCRIPT_DIR}/${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
"${MC}" "${MC_OPTS[@]}" mirror "${MIRROR_OPTS[@]}" "${EXTRA_MC_MIRROR_COMMAND[@]}" wdminio/${bucket} ${MINIO_BACKUP_DIR}/${bucket} &>> "${SCRIPT_DIR}/${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
RC=$?
echo "RC=${RC}" >> "${SCRIPT_DIR}/${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
if [ $RC -ne 0 ] ; then
Expand Down Expand Up @@ -181,23 +191,24 @@ if [ "${COMMAND}" = "restore" ] ; then
tar "${MINIO_TAR_OPTIONS[@]}" -xf ${BACKUP_FILE} -C ${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}
brlog "INFO" "Restoring data..."
start_minio_port_forward
${MC} "${MC_OPTS[@]}" --quiet config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
"${MC}" "${MC_OPTS[@]}" --quiet config host add wdminio ${S3_ENDPOINT_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} > /dev/null
for bucket_path in "${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}"/*
do
bucket="$(basename "${bucket_path}")"
brlog "DEBUG" "Restoring bucket '$bucket' ..."
if [ -n "${BUCKET_SUFFIX}" ] && [[ "${bucket}" != *"${BUCKET_SUFFIX}" ]] ; then
mv "${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}/${bucket}" "${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}/${bucket}${BUCKET_SUFFIX}"
bucket="${bucket}${BUCKET_SUFFIX}"
fi
if ${MC} "${MC_OPTS[@]}" ls wdminio | grep ${bucket} > /dev/null ; then
if [ -n "$(${MC} "${MC_OPTS[@]}" ls wdminio/${bucket}/)" ] ; then
${MC} "${MC_OPTS[@]}" --quiet rm --recursive --force --dangerous "wdminio/${bucket}/" > /dev/null
if "${MC}" "${MC_OPTS[@]}" ls wdminio | grep ${bucket} > /dev/null ; then
if [ -n "$("${MC}" "${MC_OPTS[@]}" ls wdminio/${bucket}/)" ] ; then
"${MC}" "${MC_OPTS[@]}" --quiet rm --recursive --force --dangerous "wdminio/${bucket}/" > /dev/null
fi
if [ "${bucket}" = "discovery-dfs" ] ; then
continue
fi
set +e
${MC} "${MC_OPTS[@]}" --quiet mirror ${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}/${bucket} wdminio/${bucket} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
"${MC}" "${MC_OPTS[@]}" mirror "${MIRROR_OPTS[@]}" ${TMP_WORK_DIR}/${MINIO_BACKUP_DIR}/${bucket} wdminio/${bucket} &>> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
RC=$?
echo "RC=${RC}" >> "${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log"
if [ $RC -ne 0 ] ; then
Expand Down
Loading

0 comments on commit e536b21

Please sign in to comment.