Skip to content

Commit

Permalink
Remove kubectl_bin from collect_k8s_logs and delete_crd
Browse files Browse the repository at this point in the history
  • Loading branch information
tplavcic committed Oct 19, 2023
1 parent 48800bb commit dc207e0
Showing 1 changed file with 25 additions and 25 deletions.
50 changes: 25 additions & 25 deletions e2e-tests/functions
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ wait_for_delete() {
let retry+=1
if [ $retry -ge $wait_time ]; then
collect_k8s_logs
kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
kubectl logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
| grep -v 'level=info' \
| grep -v 'level=debug' \
| grep -v 'Getting tasks for pod' \
Expand Down Expand Up @@ -888,21 +888,21 @@ deploy_cert_manager() {
delete_crd() {
desc 'get and delete old CRDs and RBAC'

kubectl_bin delete -f "${src_dir}/deploy/crd.yaml" --ignore-not-found --wait=false || :
kubectl delete -f "${src_dir}/deploy/crd.yaml" --ignore-not-found --wait=false || :
for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-'); do
kubectl get ${crd_name} --all-namespaces -o wide \
| grep -v 'NAMESPACE' \
| xargs -L 1 sh -xc 'kubectl patch '${crd_name}' -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' \
|| :
kubectl_bin wait --for=delete crd ${crd_name} || :
kubectl wait --for=delete crd ${crd_name} || :
done

local rbac_yaml='rbac.yaml'
if [ -n "${OPERATOR_NS}" ]; then
rbac_yaml='cw-rbac.yaml'
fi

kubectl_bin delete -f "${src_dir}/deploy/$rbac_yaml" --ignore-not-found || true
kubectl delete -f "${src_dir}/deploy/$rbac_yaml" --ignore-not-found || true
}

destroy() {
Expand All @@ -911,7 +911,7 @@ destroy() {

desc 'destroy cluster/operator and all other resources'
if [ ${ignore_logs} == "false" ] && [ "${DEBUG_TESTS}" == 1 ]; then
kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
kubectl logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
| grep -v 'level=info' \
| grep -v 'level=debug' \
| grep -v 'Getting tasks for pod' \
Expand All @@ -927,16 +927,16 @@ destroy() {

delete_crd

kubectl_bin delete -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" 2>/dev/null || :
kubectl delete -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" 2>/dev/null || :
if [ -n "$OPENSHIFT" ]; then
oc delete --grace-period=0 --force=true project "$namespace" &
if [ -n "$OPERATOR_NS" ]; then
oc delete --grace-period=0 --force=true project "$OPERATOR_NS" &
fi
else
kubectl_bin delete --grace-period=0 --force=true namespace "$namespace" &
kubectl delete --grace-period=0 --force=true namespace "$namespace" &
if [ -n "$OPERATOR_NS" ]; then
kubectl_bin delete --grace-period=0 --force=true namespace "$OPERATOR_NS" &
kubectl delete --grace-period=0 --force=true namespace "$OPERATOR_NS" &
fi
fi
rm -rf ${tmp_dir}
Expand Down Expand Up @@ -1234,7 +1234,7 @@ check_crd_for_deletion() {
local git_tag="$1"

for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g'); do
if [[ $(kubectl_bin get crd/${crd_name} -o jsonpath='{.status.conditions[-1].type}') == "Terminating" ]]; then
if [[ $(kubectl get crd/${crd_name} -o jsonpath='{.status.conditions[-1].type}') == "Terminating" ]]; then
kubectl get ${crd_name} --all-namespaces -o wide \
| grep -v 'NAMESPACE' \
| xargs -L 1 sh -xc 'kubectl patch '${crd_name}' -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' \
Expand Down Expand Up @@ -1312,39 +1312,39 @@ collect_k8s_logs() {
mkdir -p ${logs_path}

for ns in ${check_namespaces}; do
local pods=$(kubectl_bin get pods -n "${ns}" -o name | awk -F "/" '{print $2}')
local pods=$(kubectl get pods -n "${ns}" -o name | awk -F "/" '{print $2}')
for p in ${pods}; do
kubectl_bin -n "${ns}" describe pod ${p} >${logs_path}/pod_${ns}_${p}.dsc || :
local containers=$(kubectl_bin -n "${ns}" get pod ${p} -o jsonpath='{.spec.containers[*].name}')
kubectl -n "${ns}" describe pod ${p} >${logs_path}/pod_${ns}_${p}.dsc || :
local containers=$(kubectl -n "${ns}" get pod ${p} -o jsonpath='{.spec.containers[*].name}')
for c in ${containers}; do
kubectl_bin -n "${ns}" logs ${p} -c ${c} >${logs_path}/container_${p}_${c}.log || :
kubectl -n "${ns}" logs ${p} -c ${c} >${logs_path}/container_${p}_${c}.log || :
echo "logs saved in: ${logs_path}/${ns}_${p}_${c}.log"
done
done
done
for object in psmdb psmdb-backup psmdb-restore pods deployments replicasets services sts configmaps persistentvolumeclaims persistentvolumes secrets roles issuer certificate; do
echo "##### START: ${object} NS: ${namespace} #####" >>${logs_path}/_overview_${namespace}.txt
kubectl_bin get ${object} -n "${namespace}" >>${logs_path}/_overview_${namespace}.txt || :
kubectl get ${object} -n "${namespace}" >>${logs_path}/_overview_${namespace}.txt || :
echo -e "##### END: ${object} NS: ${namespace} #####\n" >>${logs_path}/_overview_${namespace}.txt
kubectl_bin get ${object} -n "${namespace}" -oyaml >${logs_path}/${object}_${namespace}.yaml || :
kubectl_bin describe ${object} -n "${namespace}" >${logs_path}/${object}_${namespace}.dsc || :
kubectl get ${object} -n "${namespace}" -oyaml >${logs_path}/${object}_${namespace}.yaml || :
kubectl describe ${object} -n "${namespace}" >${logs_path}/${object}_${namespace}.dsc || :
done
kubectl_bin get events --all-namespaces >${logs_path}/_events.log || :
kubectl_bin get nodes >${logs_path}/_nodes.log || :
kubectl_bin get clusterroles >${logs_path}/_clusterroles.log || :
kubectl get events --all-namespaces >${logs_path}/_events.log || :
kubectl get nodes >${logs_path}/_nodes.log || :
kubectl get clusterroles >${logs_path}/_clusterroles.log || :

local secret psmdb_secret psmdb_user psmdb_pass
for psmdb_name in "$(kubectl_bin get psmdb -n ${namespace} -o custom-columns=NAME:.metadata.name --no-headers=true)"; do
psmdb_secret="$(kubectl_bin get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.secrets.users}')"
for psmdb_name in "$(kubectl get psmdb -n ${namespace} -o custom-columns=NAME:.metadata.name --no-headers=true)"; do
psmdb_secret="$(kubectl get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.secrets.users}')"
if [[ ${psmdb_secret} ]]; then secret="${psmdb_secret}"; else secret="${psmdb_name}-secrets"; fi
psmdb_user="$(kubectl_bin get secrets ${psmdb_secret} -ojsonpath='{.data.MONGODB_BACKUP_USER}' | base64 --decode)"
psmdb_pass="$(kubectl_bin get secrets ${psmdb_secret} -ojsonpath='{.data.MONGODB_BACKUP_PASSWORD}' | base64 --decode)"
if [[ "$(kubectl_bin get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.sharding.enabled}')" == "true" ]]; then
psmdb_user="$(kubectl get secrets ${psmdb_secret} -ojsonpath='{.data.MONGODB_BACKUP_USER}' | base64 --decode)"
psmdb_pass="$(kubectl get secrets ${psmdb_secret} -ojsonpath='{.data.MONGODB_BACKUP_PASSWORD}' | base64 --decode)"
if [[ "$(kubectl get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.sharding.enabled}')" == "true" ]]; then
local cfg_replica="cfg"
echo "##### sh.status() #####" >${logs_path}/mongos_${psmdb_name}.mongo
run_mongos 'sh.status()' "${psmdb_user}:${psmdb_pass}@${psmdb_name}-mongos.${namespace}" >>${logs_path}/mongos_${psmdb_name}.mongo
fi
for psmdb_replset in $(kubectl_bin get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.replsets[*].name}' | awk '{print $0" '${cfg_replica}'"}'); do
for psmdb_replset in $(kubectl get psmdb ${psmdb_name} -n ${namespace} -ojsonpath='{.spec.replsets[*].name}' | awk '{print $0" '${cfg_replica}'"}'); do
local command=("rs.status()" "rs.config()" "db.printSlaveReplicationInfo()" "db.serverCmdLineOpts()" "db.getRoles()" "db.getUsers()")
for com in "${command[@]}"; do
echo "##### START: ${com} #####" >>${logs_path}/mongodb_${psmdb_name}_${psmdb_replset}.mongo
Expand Down

0 comments on commit dc207e0

Please sign in to comment.