From 16d681d88fd3488ab30bf805cb3d8b956596ec81 Mon Sep 17 00:00:00 2001 From: Amine Date: Tue, 13 Feb 2024 21:04:29 -0600 Subject: [PATCH] Fix rolling updates flaky tests (#105) Recently we added a new feature that allowed cluster rolling updates, allowing users to request 1.25->1.29 cluster upgrades. The tests of this feature consisted of creating a 1.27 cluster, patching 1.29 as version and then testing that the cluster goes from 1.27 to 1.28 then 1.29. However we realized that sometimes when checking that the cluster is in 1.28, we find that it's actually 1.29... meaning that the waiter didn't get the chance to find the cluster in a an active_state... This patch reduce the waiter delay to quickly catch the cluster in a active_state Signed-off-by: Amine Hilaly By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- test/e2e/tests/test_cluster.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/e2e/tests/test_cluster.py b/test/e2e/tests/test_cluster.py index 811c317..71162fb 100644 --- a/test/e2e/tests/test_cluster.py +++ b/test/e2e/tests/test_cluster.py @@ -41,7 +41,11 @@ CHECK_STATUS_WAIT_SECONDS = 30 def wait_for_cluster_active(eks_client, cluster_name): - waiter = eks_client.get_waiter('cluster_active') + waiter = eks_client.get_waiter( + 'cluster_active', + ) + waiter.config.delay = 5 + waiter.config.max_attempts = 240 waiter.wait(name=cluster_name) def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str, expected_synced: bool):