diff --git a/.github/workflows/test-on-pr.yaml b/.github/workflows/test-on-pr.yaml index 17ea4e408..5f1b70bf0 100644 --- a/.github/workflows/test-on-pr.yaml +++ b/.github/workflows/test-on-pr.yaml @@ -97,17 +97,18 @@ jobs: echo Deploy kruize in crc mode echo "***************************************************************" cp ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old - sed -e "s/image: kruize\/autotune_operator:.*/image: autotune_operator:test/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - sed -i "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml + sed -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml cat ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - kubectl apply -f ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - sleep 120 - ./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE} + + cd tests + ./test_autotune.sh -c minikube -i autotune_operator:test --testsuite=remote_monitoring_tests --testcase=test_e2e --resultsdir=${GITHUB_WORKSPACE} + cd .. + ./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}/kruize_test_results - name: Archive results if: always() run: | cd ${GITHUB_WORKSPACE} - tar cvf crc_results.tar kruize_*log.txt + tar cvf crc_results.tar kruize_test_results - name: Upload results if: always() diff --git a/.github/workflows/test-on-push.yaml b/.github/workflows/test-on-push.yaml index c968f9893..648954fac 100644 --- a/.github/workflows/test-on-push.yaml +++ b/.github/workflows/test-on-push.yaml @@ -107,17 +107,17 @@ jobs: echo Deploy Kruize in crc mode echo "***************************************************************" cp ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old - sed -e "s/image: kruize\/autotune_operator:.*/image: kruize\/autotune_operator:test/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - sed -i "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml + sed -e "s/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g" ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml.old > ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml cat ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - kubectl apply -f ./manifests/crc/default-db-included-installation/minikube/kruize-crc-minikube.yaml - sleep 120 - ./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE} + cd tests + ./test_autotune.sh -c minikube -i kruize/autotune_operator:test --testsuite=remote_monitoring_tests --testcase=test_e2e --resultsdir=${GITHUB_WORKSPACE} + cd .. + ./scripts/ffdc.sh -m crc -d ${GITHUB_WORKSPACE}/kruize_test_results - name: Archive results if: always() run: | cd ${GITHUB_WORKSPACE} - tar cvf crc_results.tar kruize_*log.txt + tar cvf crc_results.tar kruize_test_results - name: Upload results if: always() diff --git a/tests/scripts/remote_monitoring_tests/helpers/kruize.py b/tests/scripts/remote_monitoring_tests/helpers/kruize.py index c51e3b596..c7f2b98a7 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/kruize.py +++ b/tests/scripts/remote_monitoring_tests/helpers/kruize.py @@ -103,6 +103,7 @@ def list_recommendations(experiment_name): response = requests.get(url) else: PARAMS = {'experiment_name': experiment_name} + print("PARAMS = ", PARAMS) response = requests.get(url = url, params = PARAMS) print("Response status code = ", response.status_code) diff --git a/tests/scripts/remote_monitoring_tests/helpers/utils.py b/tests/scripts/remote_monitoring_tests/helpers/utils.py index 209794cb8..30c2b12e3 100644 --- a/tests/scripts/remote_monitoring_tests/helpers/utils.py +++ b/tests/scripts/remote_monitoring_tests/helpers/utils.py @@ -18,7 +18,7 @@ import json import os import re -from datetime import datetime +from datetime import datetime, timedelta SUCCESS_STATUS_CODE = 201 SUCCESS_200_STATUS_CODE = 200 @@ -130,8 +130,18 @@ def generate_test_data(csvfile, test_data): test_data = read_test_data_from_csv(csvfile) return test_data +def write_json_data_to_file(filename, data): + """ + Helper to read Json file + """ + try: + with open(filename, "w") as f: + json.dump(data, f, indent=4) + return data + except: + return None -def read_data_from_json(filename): +def read_json_data_from_file(filename): """ Helper to read Json file """ @@ -153,8 +163,7 @@ def read_test_data_from_csv(csv_file): return test_data -def generate_json(find_arr, json_file, filename, i): - +def generate_json(find_arr, json_file, filename, i, update_timestamps = False): with open(json_file, 'r') as file: data = file.read() @@ -162,13 +171,131 @@ def generate_json(find_arr, json_file, filename, i): replace = find + "_" + str(i) data = data.replace(find, replace) + if update_timestamps == True: + find = "2022-01-23T18:25:43.511Z" + replace = increment_timestamp(find, i) + data = data.replace(find, replace) + + find = "2022-01-23T18:55:43.511Z" + replace = increment_timestamp(find, i) + data = data.replace(find, replace) + with open(filename, 'w') as file: file.write(data) +def increment_timestamp(input_timestamp, step): + input_date = datetime.strptime(input_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") + minutes = 50 * step + 3600 + output_date = input_date + timedelta(minutes=minutes) + timestamp = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z' + + return timestamp + + def get_datetime(): my_datetime = datetime.today() time_str = my_datetime.isoformat(timespec = 'milliseconds') time_str = time_str + "Z" return time_str +def increment_date_time(input_date_str, term): + duration = {"short_term": 1, "medium_term": 7, "long_term": 15} + input_date = datetime.strptime(input_date_str, "%Y-%m-%dT%H:%M:%S.%fZ") + + output_date = input_date - timedelta(days=duration[term]) + output_date_str = output_date.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]+'Z' + + return output_date_str + +def validate_reco_json(create_exp_json, update_results_json, list_reco_json): + + # Validate experiment + assert create_exp_json["version"] == list_reco_json["version"] + assert create_exp_json["experiment_name"] == list_reco_json["experiment_name"] + assert create_exp_json["cluster_name"] == list_reco_json["cluster_name"] + + # Validate kubernetes objects + length = len(list_reco_json["kubernetes_objects"]) + for i in range(length): + validate_kubernetes_obj(create_exp_json["kubernetes_objects"][i], update_results_json, list_reco_json["kubernetes_objects"][i]) + + +def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_json, list_reco_kubernetes_obj): + + # Validate type, name, namespace + assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] + assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"] + assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"] + + # Validate the count of containers + assert len(list_reco_kubernetes_obj["containers"]) == len(create_exp_kubernetes_obj["containers"]) + + container_names = [] + length = len(create_exp_kubernetes_obj["containers"]) + for i in range(length): + container_names.append(create_exp_kubernetes_obj["containers"][i]["container_name"]) + + container_names.sort() + print(container_names) + + # Validate if all the containers are present + for i in range(length): + list_reco_container = None + update_results_container = None + for j in range(length): + if list_reco_kubernetes_obj["containers"][j]["container_name"] == create_exp_kubernetes_obj["containers"][i]["container_name"]: + create_exp_container = create_exp_kubernetes_obj["containers"][i] + list_reco_container = list_reco_kubernetes_obj["containers"][j] + validate_container(create_exp_container, update_results_json, list_reco_container) + +def validate_container(create_exp_container, update_results_json, list_reco_container): + # Validate container image name and container name + if create_exp_container != None and list_reco_container != None: + assert list_reco_container["container_image_name"] == create_exp_container["container_image_name"] + assert list_reco_container["container_name"] == create_exp_container["container_name"] + + # Validate timestamps + end_timestamp = update_results_json["end_timestamp"] + start_timestamp = update_results_json["start_timestamp"] + duration_based_obj = list_reco_container["recommendations"][end_timestamp]["duration_based"] + + duration_terms = ["short_term", "medium_term", "long_term"] + for term in duration_terms: + if check_if_recommendations_are_present(duration_based_obj[term]): + + # Validate timestamps + assert duration_based_obj[term]["monitoring_end_time"] == end_timestamp + + monitoring_start_time = increment_date_time(end_timestamp, term) + print(f"actual = {duration_based_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}") + assert duration_based_obj[term]["monitoring_start_time"] == monitoring_start_time + + # Validate duration in hrs + expected_duration_in_hours = time_diff_in_hours(start_timestamp, end_timestamp) + assert duration_based_obj[term]["duration_in_hours"] == expected_duration_in_hours + + # Validate recommendation config + validate_config(duration_based_obj[term]["config"]) + +def validate_config(reco_config): + usage_list = ["requests", "limits"] + for usage in usage_list: + assert reco_config[usage]["cpu"]["amount"] > 0 + assert reco_config[usage]["cpu"]["format"] == "cores" + assert reco_config[usage]["memory"]["amount"] > 0 + assert reco_config[usage]["memory"]["format"] == "MiB" + +def check_if_recommendations_are_present(duration_based_obj): + no_reco_msg = "There is not enough data available to generate a recommendation." + + for notification in duration_based_obj["notifications"]: + if notification["message"] == no_reco_msg: + return False + return True + +def time_diff_in_hours(start_timestamp, end_timestamp): + start_date = datetime.strptime(start_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") + end_date = datetime.strptime(end_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") + diff = end_date - start_date + return diff.total_seconds() / 3600 diff --git a/tests/scripts/remote_monitoring_tests/json_files/update_results.json b/tests/scripts/remote_monitoring_tests/json_files/update_results.json index ef27c283b..8567b9a6c 100644 --- a/tests/scripts/remote_monitoring_tests/json_files/update_results.json +++ b/tests/scripts/remote_monitoring_tests/json_files/update_results.json @@ -3,13 +3,120 @@ "version": "1.0", "experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db", "start_timestamp": "2022-01-23T18:25:43.511Z", - "end_timestamp": "2022-01-23T18:25:43.511Z", + "end_timestamp": "2022-01-23T18:55:43.511Z", "kubernetes_objects": [ { "type": "deployment", "name": "tfb-qrh-deployment", "namespace": "default", "containers": [ + { + "container_image_name": "kruize/tfb-db:1.15", + "container_name": "tfb-server-0", + "metrics": [ + { + "name": "cpuRequest", + "results": { + "value": 1.1, + "format": "cores", + "aggregation_info": { + "sum": 3.4, + "avg": 2.1, + "format": "cores" + } + } + }, + { + "name": "cpuLimit", + "results": { + "value": 0.8, + "format": "cores", + "aggregation_info": { + "sum": 3.0, + "avg": 1.5, + "format": "cores" + } + } + }, + { + "name": "cpuUsage", + "results": { + "value": 0.15, + "format": "cores", + "aggregation_info": { + "min": 0.54, + "max": 0.94, + "sum": 0.52, + "avg": 0.12, + "format": "cores" + } + } + }, + { + "name": "cpuThrottle", + "results": { + "value": 0.05, + "format": "cores", + "aggregation_info": { + "sum": 0.9, + "max": 0.09, + "avg": 0.04, + "format": "cores" + } + } + }, + { + "name": "memoryRequest", + "results": { + "value": 50.12, + "format": "MiB", + "aggregation_info": { + "sum": 260.85, + "avg": 50.21, + "format": "MiB" + } + } + }, + { + "name": "memoryLimit", + "results": { + "value": 100, + "format": "MiB", + "aggregation_info": { + "sum": 700, + "avg": 100, + "format": "MiB" + } + } + }, + { + "name": "memoryUsage", + "results": { + "value": 40.1, + "format": "MiB", + "aggregation_info": { + "min": 50.6, + "max": 198.50, + "sum": 298.50, + "avg": 40.1, + "format": "MiB" + } + } + }, + { + "name": "memoryRSS", + "results": { + "aggregation_info": { + "min": 50.6, + "max": 523.6, + "sum": 123.6, + "avg": 31.91, + "format": "MiB" + } + } + } + ] + }, { "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", "container_name": "tfb-server-1", diff --git a/tests/scripts/remote_monitoring_tests/pytest.ini b/tests/scripts/remote_monitoring_tests/pytest.ini index 3cf9961ed..48bdd36e6 100644 --- a/tests/scripts/remote_monitoring_tests/pytest.ini +++ b/tests/scripts/remote_monitoring_tests/pytest.ini @@ -2,5 +2,6 @@ [pytest] markers = sanity: mark a test as a sanity test + test_e2e: mark a test as end-to-end test negative: mark test as a negative test extended: mark test as a extended test diff --git a/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh b/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh index 138e6adbc..d8c869694 100755 --- a/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh +++ b/tests/scripts/remote_monitoring_tests/remote_monitoring_tests.sh @@ -33,6 +33,7 @@ function remote_monitoring_tests() { TESTS_PASSED=0 TESTS=0 failed=0 + marker_options="" ((TOTAL_TEST_SUITES++)) python3 --version >/dev/null 2>/dev/null @@ -47,7 +48,7 @@ function remote_monitoring_tests() { target="crc" perf_profile_json="${REMOTE_MONITORING_TEST_DIR}/json_files/resource_optimization_openshift.json" - remote_monitoring_tests=("test_create_experiment" "test_update_results") + remote_monitoring_tests=("sanity" "negative" "extended" "test_e2e") # check if the test case is supported if [ ! -z "${testcase}" ]; then @@ -107,9 +108,9 @@ function remote_monitoring_tests() { echo "Test description: ${remote_monitoring_test_description[$test]}" | tee -a ${LOG} echo " " | tee -a ${LOG} - echo "pytest ${REMOTE_MONITORING_TEST_DIR}/rest_apis/${test}.py --cluster_type ${cluster_type}" | tee -a ${LOG} pushd ${REMOTE_MONITORING_TEST_DIR}/rest_apis > /dev/null - pytest --html=${TEST_DIR}/report.html ${test}.py --cluster_type ${cluster_type} | tee -a ${LOG} + echo "pytest -m ${test} --html=${TEST_DIR}/report.html --cluster_type ${cluster_type}" + pytest -m ${test} --html=${TEST_DIR}/report.html --cluster_type ${cluster_type} | tee -a ${LOG} popd > /dev/null if grep -q "AssertionError" "${LOG}" ; then failed=1 diff --git a/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py new file mode 100644 index 000000000..7ca7f3c8e --- /dev/null +++ b/tests/scripts/remote_monitoring_tests/rest_apis/test_e2e_workflow.py @@ -0,0 +1,94 @@ +import requests +import pytest +from jinja2 import Environment, FileSystemLoader +from helpers.utils import * +from helpers.list_reco_json_validate import * +from helpers.kruize import * +from helpers.fixtures import * +import time +import json + +@pytest.mark.test_e2e +def test_list_recommendations_multiple_exps_from_diff_json_files(cluster_type): + """ + Test Description: This test validates list recommendations for multiple experiments posted using different json files + """ + + input_json_file="../json_files/create_exp.json" + result_json_file="../json_files/update_results.json" + + find = [] + json_data = json.load(open(input_json_file)) + + find.append(json_data[0]['experiment_name']) + find.append(json_data[0]['kubernetes_objects'][0]['name']) + find.append(json_data[0]['kubernetes_objects'][0]['namespace']) + + form_kruize_url(cluster_type) + + # Create experiment using the specified json + num_exps = 100 + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + generate_json(find, input_json_file, json_file, i) + + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) + + response = create_experiment(json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + for i in range(num_exps): + # Update results for the experiment + json_file = "/tmp/update_results_" + str(i) + ".json" + update_timestamps = True + generate_json(find, result_json_file, json_file, i, update_timestamps) + response = update_results(json_file) + + data = response.json() + print("message = ", data['message']) + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == UPDATE_RESULTS_SUCCESS_MSG + + time.sleep(30) + + # Get the experiment name + json_data = json.load(open(input_json_file)) + + experiment_name = "" + response = list_recommendations(experiment_name) + + list_reco_json = response.json() + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json) + assert errorMsg == "" + + # Validate the json values + for i in range(num_exps): + input_json_file = "/tmp/create_exp_" + str(i) + ".json" + create_exp_json = read_json_data_from_file(input_json_file) + + result_json_file = "/tmp/update_results_" + str(i) + ".json" + update_results_json = read_json_data_from_file(result_json_file) + + experiment_name = create_exp_json[0]['experiment_name'] + response = list_recommendations(experiment_name) + list_reco_json = response.json() + + validate_reco_json(create_exp_json[0], update_results_json[0], list_reco_json[0]) + + # Delete the experiments + for i in range(num_exps): + json_file = "/tmp/create_exp_" + str(i) + ".json" + + response = delete_experiment(json_file) + print("delete exp = ", response.status_code) +