Skip to content

Commit

Permalink
Merge pull request #255 from KosukeOkamoto/update-discovery-scripts-f…
Browse files Browse the repository at this point in the history
…or-4.8.2

Update script for Watson Discovery 4.8.2 on CP4D
  • Loading branch information
kvineetnair authored Feb 1, 2024
2 parents a55030a + 653e4c5 commit 4fd220f
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 31 deletions.
16 changes: 8 additions & 8 deletions discovery-data/latest/all-backup-restore.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ set -e

BACKUP_DIR="tmp"
TMP_WORK_DIR="tmp/all_backup"
SPLITE_DIR=./tmp_split_bakcup
SPLIT_DIR=./tmp_split_backup
EXTRA_OC_ARGS="${EXTRA_OC_ARGS:-}"

SCRIPT_DIR=$(dirname $0)
Expand All @@ -23,12 +23,12 @@ Usage:
Options:
--help, -h Show help
--file, -f Speccify backup file
--file, -f Specify backup file
--mapping, -m <mapping_file> Specify mapping file for restore to multi tenant clusters
--instance-name, -i <instance_name> Instance name for a new Discovery instance. This name will be used if there is no Discovery instance when restore backup of Discovery 4.0.5 or older
--cp4d-user-id <user_id> User ID to create Discovery instance. Default: admin user ID.
--cp4d-user-name <user_name> User name to create Discovery instance. Default: admin.
--log-output-dir <directory_path> Specify outout direcotry of detailed component logs
--log-output-dir <directory_path> Specify output directory of detailed component logs
--continue-from <component_name> Resume backup or restore from specified component. Values: wddata, etcd, postgresql, elastic, minio, archive, migration, post-restore
--quiesce-on-error=[true|false] If true, not unquiesce on error during backup or restore. Default false on backup, true on restore.
--clean Remove existing tmp directory before start backup or restore.
Expand All @@ -38,8 +38,8 @@ Basically, you don't need these advanced options.
--archive-on-local Archive the backup files of etcd and postgresql on local machine. Use this flag to reduce the disk usage on their pod or compress the files with specified option, but it might take much time.
--backup-archive-option="<tar_option>" Tar options for compression used on archiving the backup file. Default none.
--datastore-archive-option="<tar_option>" Tar options for comporession used on archiving the backup files of ElasticSearch, MinIO and internal configuration. Default "-z".
--postgresql-archive-option="<tar_option>" Tar options for comporession used on archiving the backup files of postgres. Note that the backup files of postgresql are archived on its pod by default. Default "-z".
--datastore-archive-option="<tar_option>" Tar options for compression used on archiving the backup files of ElasticSearch, MinIO and internal configuration. Default "-z".
--postgresql-archive-option="<tar_option>" Tar options for compression used on archiving the backup files of postgres. Note that the backup files of postgresql are archived on its pod by default. Default "-z".
--etcd-archive-option="<tar_option>" Tar options used on archiving the backup files of etcd. Note that the backup files of etcd are archived on its pod by default. Default "-z".
--skip-verify-archive Skip the all verifying process of the archive.
--skip-verify-backup Skip verifying the backup file.
Expand Down Expand Up @@ -335,11 +335,11 @@ if [ -z "${CONTINUE_FROM_COMPONENT+UNDEF}" ] && [ -d "${BACKUP_DIR}" ] ; then
fi
fi

if [ -d "${SPLITE_DIR}" ] ; then
if [ -d "${SPLIT_DIR}" ] ; then
if "${CLEAN}" ; then
rm -rf "${SPLITE_DIR}"
rm -rf "${SPLIT_DIR}"
else
brlog "ERROR" "Please remove ${SPLITE_DIR}"
brlog "ERROR" "Please remove ${SPLIT_DIR}"
exit 1
fi
fi
Expand Down
44 changes: 22 additions & 22 deletions discovery-data/latest/lib/function.bash
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ validate_version(){
VERSIONS=(${SCRIPT_VERSION//./ })
VERSION="${VERSIONS[0]}.${VERSIONS[1]}.${VERSIONS[2]}"
if [ $(compare_version "${VERSION}" "${WD_VERSION}") -lt 0 ] ; then
brlog "ERROR" "Invalid script version. The version of scripts '${SCRIPT_VERSION}' is not valid for the version of Watson Doscovery '${WD_VERSION}' "
brlog "ERROR" "Invalid script version. The version of scripts '${SCRIPT_VERSION}' is not valid for the version of Watson Discovery '${WD_VERSION}' "
exit 1
fi
}
Expand Down Expand Up @@ -223,8 +223,8 @@ kube_cp_from_local(){
shift
POD_BACKUP=$1
shift
SPLITE_DIR=./tmp_split_bakcup
SPLITE_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
SPLIT_DIR=./tmp_split_backup
SPLIT_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
LOCAL_BASE_NAME=$(basename "${LOCAL_BACKUP}")
POD_DIST_DIR=$(dirname "${POD_BACKUP}")

Expand Down Expand Up @@ -252,15 +252,15 @@ kube_cp_from_local(){

STAT_CMD="$(get_stat_command) ${LOCAL_BACKUP}"
LOCAL_SIZE=$(eval "${STAT_CMD}")
if [ ${SPLITE_SIZE} -ne 0 -a ${LOCAL_SIZE} -gt ${SPLITE_SIZE} ] ; then
rm -rf ${SPLITE_DIR}
mkdir -p ${SPLITE_DIR}
split -a 5 -b ${SPLITE_SIZE} ${LOCAL_BACKUP} ${SPLITE_DIR}/${LOCAL_BASE_NAME}.split.
for splitfile in ${SPLITE_DIR}/*; do
if [ ${SPLIT_SIZE} -ne 0 -a ${LOCAL_SIZE} -gt ${SPLIT_SIZE} ] ; then
rm -rf ${SPLIT_DIR}
mkdir -p ${SPLIT_DIR}
split -a 5 -b ${SPLIT_SIZE} ${LOCAL_BACKUP} ${SPLIT_DIR}/${LOCAL_BASE_NAME}.split.
for splitfile in ${SPLIT_DIR}/*; do
FILE_BASE_NAME=$(basename "${splitfile}")
_oc_cp "${splitfile}" "${POD}:${POD_DIST_DIR}/${FILE_BASE_NAME}" $@
done
rm -rf ${SPLITE_DIR}
rm -rf ${SPLIT_DIR}
run_cmd_in_pod ${POD} "cat ${POD_DIST_DIR}/${LOCAL_BASE_NAME}.split.* > ${POD_BACKUP} && rm -rf ${POD_DIST_DIR}/${LOCAL_BASE_NAME}.split.*" $@
else
_oc_cp "${LOCAL_BACKUP}" "${POD}:${POD_BACKUP}" $@
Expand All @@ -279,8 +279,8 @@ kube_cp_to_local(){
shift
POD_BACKUP=$1
shift
SPLITE_DIR=./tmp_split_bakcup
SPLITE_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
SPLIT_DIR=./tmp_split_backup
SPLIT_SIZE=${BACKUP_RESTORE_SPLIT_SIZE:-500000000}
POD_DIST_DIR=$(dirname "${POD_BACKUP}")

if "${IS_RECURSIVE}" ; then
Expand Down Expand Up @@ -308,17 +308,17 @@ kube_cp_to_local(){
fi

POD_SIZE=$(oc $@ exec ${POD} -- sh -c "stat -c "%s" ${POD_BACKUP}")
if [ ${SPLITE_SIZE} -ne 0 -a ${POD_SIZE} -gt ${SPLITE_SIZE} ] ; then
rm -rf ${SPLITE_DIR}
mkdir -p ${SPLITE_DIR}
run_cmd_in_pod ${POD} "split -d -a 5 -b ${SPLITE_SIZE} ${POD_BACKUP} ${POD_BACKUP}.split." $@
if [ ${SPLIT_SIZE} -ne 0 -a ${POD_SIZE} -gt ${SPLIT_SIZE} ] ; then
rm -rf ${SPLIT_DIR}
mkdir -p ${SPLIT_DIR}
run_cmd_in_pod ${POD} "split -d -a 5 -b ${SPLIT_SIZE} ${POD_BACKUP} ${POD_BACKUP}.split." $@
FILE_LIST=$(oc exec $@ ${POD} -- sh -c "ls ${POD_BACKUP}.split.*")
for splitfile in ${FILE_LIST} ; do
FILE_BASE_NAME=$(basename "${splitfile}")
_oc_cp "${POD}:${splitfile}" "${SPLITE_DIR}/${FILE_BASE_NAME}" $@
_oc_cp "${POD}:${splitfile}" "${SPLIT_DIR}/${FILE_BASE_NAME}" $@
done
cat ${SPLITE_DIR}/* > ${LOCAL_BACKUP}
rm -rf ${SPLITE_DIR}
cat ${SPLIT_DIR}/* > ${LOCAL_BACKUP}
rm -rf ${SPLIT_DIR}
oc exec $@ ${POD} -- bash -c "rm -rf ${POD_BACKUP}.split.*"
else
_oc_cp "${POD}:${POD_BACKUP}" "${LOCAL_BACKUP}" $@
Expand Down Expand Up @@ -396,9 +396,9 @@ keep_minio_port_forward(){
while [ -e ${TMP_WORK_DIR}/keep_minio_port_forward ]
do
if [ -n "${S3_NAMESPACE+UNDEF}" ] ; then
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-foward.log" &
oc ${OC_ARGS} -n "${S3_NAMESPACE}" port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
else
oc ${OC_ARGS} port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-foward.log" &
oc ${OC_ARGS} port-forward svc/${S3_PORT_FORWARD_SVC} ${S3_FORWARD_PORT}:${S3_PORT} &>> "${BACKUP_RESTORE_LOG_DIR}/port-forward.log" &
fi
PORT_FORWARD_PID=$!
while [ -e ${TMP_WORK_DIR}/keep_minio_port_forward ] && kill -0 ${PORT_FORWARD_PID} &> /dev/null
Expand Down Expand Up @@ -763,7 +763,7 @@ EOF
files=$(fetch_cmd_result ${pod} "ls /tmp" $@)
if echo "${files}" | grep "${WD_CMD_FAILED_TOKEN}" > /dev/null ; then
oc exec $@ ${pod} -- bash -c "rm -f /tmp/${WD_CMD_FAILED_TOKEN}"
brlog "ERROR" "Something error happned while running command in ${pod}. See ${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log for details."
brlog "ERROR" "Something error happened while running command in ${pod}. See ${BACKUP_RESTORE_LOG_DIR}/${CURRENT_COMPONENT}.log for details."
exit 1
fi
}
Expand Down Expand Up @@ -1273,7 +1273,7 @@ create_restore_instance_mappings(){
brlog "ERROR" "Failed to create Discovery service instance for ${src_instances[$i]}"
return 1
else
brlog "INFO" "Created Disocvery service instance: ${instance_id}"
brlog "INFO" "Created Discovery service instance: ${instance_id}"
mapping=$(fetch_cmd_result ${ELASTIC_POD} "echo '${mapping}' | jq -r '.instance_mappings |= . + [{\"source_instance_id\": \"${src_instances[$i]}\", \"dest_instance_id\": \"${instance_id}\"}]'" -c elasticsearch)
fi
done
Expand Down
2 changes: 1 addition & 1 deletion discovery-data/latest/version.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
The Backup and Restore Scripts for the Watson Discovery on CP4D.
Scripts Version: 4.8.0
Scripts Version: 4.8.2

0 comments on commit 4fd220f

Please sign in to comment.