Skip to content

Commit

Permalink
Updated a test for PR check
Browse files Browse the repository at this point in the history
Signed-off-by: Chandrakala Subramanyam <[email protected]>
  • Loading branch information
chandrams committed Mar 20, 2023
1 parent bcc55e5 commit 96b662b
Show file tree
Hide file tree
Showing 8 changed files with 352 additions and 20 deletions.
13 changes: 7 additions & 6 deletions .github/workflows/test-on-pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,18 @@ jobs:
echo Deploy kruize in crc mode
echo "***************************************************************"
cp ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old
sed -e "s/image: kruize\/autotune_operator:.*/image: autotune_operator:test/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sed -i "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sed -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
cat ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
kubectl apply -f ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sleep 120
./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}
cd tests
./test_autotune.sh -c minikube -i autotune_operator:test --testsuite=remote_monitoring_tests --testcase=test_e2e --resultsdir=${GITHUB_WORKSPACE}
cd ..
./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}/kruize_test_results
- name: Archive results
if: always()
run: |
cd ${GITHUB_WORKSPACE}
tar cvf crc_results.tar kruize_*log.txt
tar cvf crc_results.tar kruize_test_results
- name: Upload results
if: always()
Expand Down
12 changes: 6 additions & 6 deletions .github/workflows/test-on-push.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -107,17 +107,17 @@ jobs:
echo Deploy Kruize in crc mode
echo "***************************************************************"
cp ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old
sed -e "s/image: kruize\/autotune_operator:.*/image: kruize\/autotune_operator:test/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sed -i "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sed -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
cat ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
kubectl apply -f ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml
sleep 120
./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}
cd tests
./test_autotune.sh -c minikube -i kruize/autotune_operator:test --testsuite=remote_monitoring_tests --testcase=test_e2e --resultsdir=${GITHUB_WORKSPACE}
cd ..
./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}/kruize_test_results
- name: Archive results
if: always()
run: |
cd ${GITHUB_WORKSPACE}
tar cvf crc_results.tar kruize_*log.txt
tar cvf crc_results.tar kruize_test_results
- name: Upload results
if: always()
Expand Down
1 change: 1 addition & 0 deletions tests/scripts/remote_monitoring_tests/helpers/kruize.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ def list_recommendations(experiment_name):
response = requests.get(url)
else:
PARAMS = {'experiment_name': experiment_name}
print("PARAMS = ", PARAMS)
response = requests.get(url = url, params = PARAMS)

print("Response status code = ", response.status_code)
Expand Down
135 changes: 131 additions & 4 deletions tests/scripts/remote_monitoring_tests/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import json
import os
import re
from datetime import datetime
from datetime import datetime, timedelta

SUCCESS_STATUS_CODE = 201
SUCCESS_200_STATUS_CODE = 200
Expand Down Expand Up @@ -130,8 +130,18 @@ def generate_test_data(csvfile, test_data):
test_data = read_test_data_from_csv(csvfile)
return test_data

def write_json_data_to_file(filename, data):
"""
Helper to read Json file
"""
try:
with open(filename, "w") as f:
json.dump(data, f, indent=4)
return data
except:
return None

def read_data_from_json(filename):
def read_json_data_from_file(filename):
"""
Helper to read Json file
"""
Expand All @@ -153,22 +163,139 @@ def read_test_data_from_csv(csv_file):

return test_data

def generate_json(find_arr, json_file, filename, i):

def generate_json(find_arr, json_file, filename, i, update_timestamps = False):
with open(json_file, 'r') as file:
data = file.read()

for find in find_arr:
replace = find + "_" + str(i)
data = data.replace(find, replace)

if update_timestamps == True:
find = "2022-01-23T18:25:43.511Z"
replace = increment_timestamp(find, i)
data = data.replace(find, replace)

find = "2022-01-23T18:55:43.511Z"
replace = increment_timestamp(find, i)
data = data.replace(find, replace)

with open(filename, 'w') as file:
file.write(data)

def increment_timestamp(input_timestamp, step):
input_date = datetime.strptime(input_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
minutes = 50 * step + 3600
output_date = input_date + timedelta(minutes=minutes)
timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z'

return timestamp


def get_datetime():
my_datetime = datetime.today()
time_str = my_datetime.isoformat(timespec = 'milliseconds')
time_str = time_str + "Z"
return time_str

def increment_date_time(input_date_str, term):
duration = {"short_term": 1, "medium_term": 7, "long_term": 15}
input_date = datetime.strptime(input_date_str, "%Y-%m-%dT%H:%M:%S.%fZ")

output_date = input_date - timedelta(days=duration[term])
output_date_str = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z'

return output_date_str

def validate_reco_json(create_exp_json, update_results_json, list_reco_json):

# Validate experiment
assert create_exp_json["version"] == list_reco_json["version"]
assert create_exp_json["experiment_name"] == list_reco_json["experiment_name"]
assert create_exp_json["cluster_name"] == list_reco_json["cluster_name"]

# Validate kubernetes objects
length = len(list_reco_json["kubernetes_objects"])
for i in range(length):
validate_kubernetes_obj(create_exp_json["kubernetes_objects"][i], update_results_json, list_reco_json["kubernetes_objects"][i])


def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_json, list_reco_kubernetes_obj):

# Validate type, name, namespace
assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"]
assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"]
assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"]

# Validate the count of containers
assert len(list_reco_kubernetes_obj["containers"]) == len(create_exp_kubernetes_obj["containers"])

container_names = []
length = len(create_exp_kubernetes_obj["containers"])
for i in range(length):
container_names.append(create_exp_kubernetes_obj["containers"][i]["container_name"])

container_names.sort()
print(container_names)

# Validate if all the containers are present
for i in range(length):
list_reco_container = None
update_results_container = None
for j in range(length):
if list_reco_kubernetes_obj["containers"][j]["container_name"] == create_exp_kubernetes_obj["containers"][i]["container_name"]:
create_exp_container = create_exp_kubernetes_obj["containers"][i]
list_reco_container = list_reco_kubernetes_obj["containers"][j]
validate_container(create_exp_container, update_results_json, list_reco_container)

def validate_container(create_exp_container, update_results_json, list_reco_container):
# Validate container image name and container name
if create_exp_container != None and list_reco_container != None:
assert list_reco_container["container_image_name"] == create_exp_container["container_image_name"]
assert list_reco_container["container_name"] == create_exp_container["container_name"]

# Validate timestamps
end_timestamp = update_results_json["end_timestamp"]
start_timestamp = update_results_json["start_timestamp"]
duration_based_obj = list_reco_container["recommendations"][end_timestamp]["duration_based"]

duration_terms = ["short_term", "medium_term", "long_term"]
for term in duration_terms:
if check_if_recommendations_are_present(duration_based_obj[term]):

# Validate timestamps
assert duration_based_obj[term]["monitoring_end_time"] == end_timestamp

monitoring_start_time = increment_date_time(end_timestamp, term)
print(f"actual = {duration_based_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}")
assert duration_based_obj[term]["monitoring_start_time"] == monitoring_start_time

# Validate duration in hrs
expected_duration_in_hours = time_diff_in_hours(start_timestamp, end_timestamp)
assert duration_based_obj[term]["duration_in_hours"] == expected_duration_in_hours

# Validate recommendation config
validate_config(duration_based_obj[term]["config"])

def validate_config(reco_config):
usage_list = ["requests", "limits"]
for usage in usage_list:
assert reco_config[usage]["cpu"]["amount"] > 0
assert reco_config[usage]["cpu"]["format"] == "cores"
assert reco_config[usage]["memory"]["amount"] > 0
assert reco_config[usage]["memory"]["format"] == "MiB"

def check_if_recommendations_are_present(duration_based_obj):
no_reco_msg = "There is not enough data available to generate a recommendation."

for notification in duration_based_obj["notifications"]:
if notification["message"] == no_reco_msg:
return False
return True

def time_diff_in_hours(start_timestamp, end_timestamp):
start_date = datetime.strptime(start_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
end_date = datetime.strptime(end_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
diff = end_date - start_date
return diff.total_seconds() / 3600

109 changes: 108 additions & 1 deletion tests/scripts/remote_monitoring_tests/json_files/update_results.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,120 @@
"version": "1.0",
"experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db",
"start_timestamp": "2022-01-23T18:25:43.511Z",
"end_timestamp": "2022-01-23T18:25:43.511Z",
"end_timestamp": "2022-01-23T18:55:43.511Z",
"kubernetes_objects": [
{
"type": "deployment",
"name": "tfb-qrh-deployment",
"namespace": "default",
"containers": [
{
"container_image_name": "kruize/tfb-db:1.15",
"container_name": "tfb-server-0",
"metrics": [
{
"name": "cpuRequest",
"results": {
"value": 1.1,
"format": "cores",
"aggregation_info": {
"sum": 3.4,
"avg": 2.1,
"format": "cores"
}
}
},
{
"name": "cpuLimit",
"results": {
"value": 0.8,
"format": "cores",
"aggregation_info": {
"sum": 3.0,
"avg": 1.5,
"format": "cores"
}
}
},
{
"name": "cpuUsage",
"results": {
"value": 0.15,
"format": "cores",
"aggregation_info": {
"min": 0.54,
"max": 0.94,
"sum": 0.52,
"avg": 0.12,
"format": "cores"
}
}
},
{
"name": "cpuThrottle",
"results": {
"value": 0.05,
"format": "cores",
"aggregation_info": {
"sum": 0.9,
"max": 0.09,
"avg": 0.04,
"format": "cores"
}
}
},
{
"name": "memoryRequest",
"results": {
"value": 50.12,
"format": "MiB",
"aggregation_info": {
"sum": 260.85,
"avg": 50.21,
"format": "MiB"
}
}
},
{
"name": "memoryLimit",
"results": {
"value": 100,
"format": "MiB",
"aggregation_info": {
"sum": 700,
"avg": 100,
"format": "MiB"
}
}
},
{
"name": "memoryUsage",
"results": {
"value": 40.1,
"format": "MiB",
"aggregation_info": {
"min": 50.6,
"max": 198.50,
"sum": 298.50,
"avg": 40.1,
"format": "MiB"
}
}
},
{
"name": "memoryRSS",
"results": {
"aggregation_info": {
"min": 50.6,
"max": 523.6,
"sum": 123.6,
"avg": 31.91,
"format": "MiB"
}
}
}
]
},
{
"container_image_name": "kruize/tfb-qrh:1.13.2.F_et17",
"container_name": "tfb-server-1",
Expand Down
1 change: 1 addition & 0 deletions tests/scripts/remote_monitoring_tests/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
[pytest]
markers =
sanity: mark a test as a sanity test
test_e2e: mark a test as end-to-end test
negative: mark test as a negative test
extended: mark test as a extended test
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ function remote_monitoring_tests() {
TESTS_PASSED=0
TESTS=0
failed=0
marker_options=""
((TOTAL_TEST_SUITES++))

python3 --version >/dev/null 2>/dev/null
Expand All @@ -47,7 +48,7 @@ function remote_monitoring_tests() {
target="crc"
perf_profile_json="${REMOTE_MONITORING_TEST_DIR}/json_files/resource_optimization_openshift.json"

remote_monitoring_tests=("test_create_experiment" "test_update_results")
remote_monitoring_tests=("sanity" "negative" "extended" "test_e2e")

# check if the test case is supported
if [ ! -z "${testcase}" ]; then
Expand Down Expand Up @@ -107,9 +108,9 @@ function remote_monitoring_tests() {
echo "Test description: ${remote_monitoring_test_description[$test]}" | tee -a ${LOG}
echo " " | tee -a ${LOG}

echo "pytest ${REMOTE_MONITORING_TEST_DIR}/rest_apis/${test}.py --cluster_type ${cluster_type}" | tee -a ${LOG}
pushd ${REMOTE_MONITORING_TEST_DIR}/rest_apis > /dev/null
pytest --html=${TEST_DIR}/report.html ${test}.py --cluster_type ${cluster_type} | tee -a ${LOG}
echo "pytest -m ${test} --html=${TEST_DIR}/report.html --cluster_type ${cluster_type}"
pytest -m ${test} --html=${TEST_DIR}/report.html --cluster_type ${cluster_type} | tee -a ${LOG}
popd > /dev/null
if grep -q "AssertionError" "${LOG}" ; then
failed=1
Expand Down
Loading

0 comments on commit 96b662b

Please sign in to comment.