Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Included tests to update results to validate behavior when metrics section is missing #1009

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions tests/scripts/common/common_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,16 @@ function deploy_autotune() {
namespace="monitoring"
fi
echo "Namespace = $namespace"
service="autotune"
if [ ${target} == "crc" ]; then
service="kruize"
fi
autotune_pod=$(kubectl get pod -n ${namespace} | grep ${service} | cut -d " " -f1)
echo "autotune_pod = $autotune_pod"
if [ ${target} == "crc" ]; then
autotune_pod=$(kubectl get pod -n ${namespace} | grep ${service} | grep -v kruize-ui | cut -d " " -f1)
echo "autotune_pod = $autotune_pod"
echo "kubectl -n ${namespace} logs -f ${autotune_pod} > "${AUTOTUNE_POD_LOG}" 2>&1 &"
kubectl -n ${namespace} logs -f ${autotune_pod} > "${AUTOTUNE_POD_LOG}" 2>&1 &
else
service="autotune"
autotune_pod=$(kubectl get pod -n ${namespace} | grep ${service} | cut -d " " -f1)
echo "autotune_pod = $autotune_pod"
echo "kubectl -n ${namespace} logs -f ${autotune_pod} -c autotune > "${AUTOTUNE_POD_LOG}" 2>&1 &"
kubectl -n ${namespace} logs -f ${autotune_pod} -c autotune > "${AUTOTUNE_POD_LOG}" 2>&1 &
fi
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
[
{
"version": "1.0",
"experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db",
"interval_start_time": "2022-01-23T18:25:43.511Z",
"interval_end_time": "2022-01-23T18:40:43.570Z",
"kubernetes_objects": [
{
"type": "deployment",
"name": "tfb-qrh-deployment",
"namespace": "default",
"containers": [
{
"container_image_name": "kruize/tfb-db:1.15",
"container_name": "tfb-server-0"
},
{
"container_image_name": "kruize/tfb-qrh:1.13.2.F_et17",
"container_name": "tfb-server-1"
}
]
}
]
}
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
[
{
"version": "1.0",
"experiment_name": "quarkus-resteasy-kruize-min-http-response-time-db",
"interval_start_time": "2022-01-23T18:25:43.511Z",
"interval_end_time": "2022-01-23T18:40:43.570Z",
"kubernetes_objects": [
{
"type": "deployment",
"name": "tfb-qrh-deployment",
"namespace": "default",
"containers": [
{
"container_image_name": "kruize/tfb-db:1.15",
"container_name": "tfb-server-0"
},
{
"container_image_name": "kruize/tfb-qrh:1.13.2.F_et17",
"container_name": "tfb-server-1",
"metrics": [
{
"name": "cpuRequest",
"results": {
"aggregation_info": {
"sum": 4.4,
"avg": 1.1,
"format": "cores"
}
}
},
{
"name": "cpuLimit",
"results": {
"aggregation_info": {
"sum": 2.0,
"avg": 0.5,
"format": "cores"
}
}
},
{
"name": "cpuUsage",
"results": {
"aggregation_info": {
"min": 0.14,
"max": 0.84,
"sum": 0.84,
"avg": 0.12,
"format": "cores"
}
}
},
{
"name": "cpuThrottle",
"results": {
"aggregation_info": {
"sum": 0.19,
"max": 0.09,
"avg": 0.045,
"format": "cores"
}
}
},
{
"name": "memoryRequest",
"results": {
"aggregation_info": {
"sum": 250.85,
"avg": 50.21,
"format": "MiB"
}
}
},
{
"name": "memoryLimit",
"results": {
"aggregation_info": {
"sum": 500,
"avg": 100,
"format": "MiB"
}
}
},
{
"name": "memoryUsage",
"results": {
"aggregation_info": {
"min": 50.6,
"max": 198.50,
"sum": 198.50,
"avg": 40.1,
"format": "MiB"
}
}
},
{
"name": "memoryRSS",
"results": {
"aggregation_info": {
"min": 50.6,
"max": 123.6,
"sum": 123.6,
"avg": 31.91,
"format": "MiB"
}
}
}
]
}
]
}
]
}
]
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@
("valid_plus_30s", "2022-01-23T18:41:13.511Z")
]

missing_metrics = [
("Missing_metrics_single_res_single_container", "../json_files/missing_metrics_jsons/update_results_missing_metrics_single_container.json", "Out of a total of 1 records, 1 failed to save", "Metric data is not present for container"),
("Missing_metrics_single_res_all_containers", "../json_files/missing_metrics_jsons/update_results_missing_metrics_all_containers.json", "Out of a total of 1 records, 1 failed to save", "Metric data is not present for container"),
("Missing_metrics_bulk_res_single_container", "../json_files/missing_metrics_jsons/bulk_update_results_missing_metrics_single_container.json", "Out of a total of 100 records, 1 failed to save", "Metric data is not present for container"),
("Missing_metrics_bulk_res_few_containers", "../json_files/missing_metrics_jsons/bulk_update_results_missing_metrics_few_containers.json", "Out of a total of 100 records, 2 failed to save", "Metric data is not present for container"),
("Missing_metrics_bulk_res_few_containers_few_individual_metrics_missing", "../json_files/missing_metrics_jsons/bulk_update_results_missing_metrics_few_containers_few_individual_metrics_missing.json", "Out of a total of 100 records, 4 failed to save", "Metric data is not present for container")
]


@pytest.mark.negative
@pytest.mark.parametrize(
Expand Down Expand Up @@ -131,6 +139,50 @@ def test_update_results_invalid_tests(test_name, expected_status_code, version,
response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)

@pytest.mark.negative
@pytest.mark.parametrize("test_name, result_json_file, expected_message, error_message", missing_metrics)
def test_update_results_with_missing_metrics_section(test_name, result_json_file, expected_message, error_message, cluster_type):
"""
Test Description: This test validates update results for a valid experiment
by updating results with entire metrics section missing
"""
input_json_file = "../json_files/create_exp.json"

form_kruize_url(cluster_type)
response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)

# Create experiment using the specified json
response = create_experiment(input_json_file)

data = response.json()
assert response.status_code == SUCCESS_STATUS_CODE
assert data['status'] == SUCCESS_STATUS
assert data['message'] == CREATE_EXP_SUCCESS_MSG

# Update results for the experiment
response = update_results(result_json_file)

data = response.json()
assert response.status_code == ERROR_STATUS_CODE
assert data['status'] == ERROR_STATUS
print("**************************")
print(data['message'])
print("**************************")

# add assertion of expected message
assert data['message'] == expected_message

# add assertion of expected error message
msg_data=data['data']
for d in msg_data:
error_data=d["errors"]
for err in error_data:
actual_error_message = err["message"]
assert error_message in actual_error_message

response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)

@pytest.mark.sanity
def test_update_valid_results_after_create_exp(cluster_type):
Expand Down
Loading