-
Notifications
You must be signed in to change notification settings - Fork 9
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Adding some integration tests #19
Open
alteryx-sezell
wants to merge
4
commits into
main
Choose a base branch
from
more-tests
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
de299c6
adding some integration tests
alteryx-sezell c565ed4
fixing linting for dummy test file
alteryx-sezell 3474b57
adding tests for running against a yaml instead of py file
alteryx-sezell 563501c
made a few checks a bit more complete
alteryx-sezell File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
"""Module: Integration Tests.""" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
27 changes: 0 additions & 27 deletions
27
tests/integration/test__configuration_integration_with_pytest.py
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import logging | ||
|
||
from assertpy import assert_that | ||
|
||
from grasshopper.lib.configuration.gh_configuration import GHConfiguration | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
def test__complete_configuration__simplest_case(complete_configuration): | ||
"""Validate the whole 'stack' of fixtures that produce complete_configuration. | ||
|
||
This test validates that all the fixtures feed into complete_configuration without | ||
errors and that the result of the entire process is a GHConfiguration object with | ||
about the right amount (currently there are 12, but this may change if more | ||
defaults are added). | ||
|
||
Note that in this case, most of the fixtures are not contributing additional | ||
values. | ||
|
||
""" | ||
logger.debug(f"COMPLETE CONFIGURATION: {complete_configuration}") | ||
|
||
assert_that(complete_configuration).is_instance_of(GHConfiguration) | ||
# without supplying much, complete configuration should be >=12 items | ||
assert_that(len(complete_configuration)).is_greater_than(11) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,207 @@ | ||
import logging | ||
|
||
from assertpy import assert_that | ||
|
||
from tests.integration.conftest import construct_msg # noqa: I202 | ||
from tests.unit.conftest import ( # noqa: I202 | ||
calculate_path, | ||
perform_pytester_test_with_optional_log_capture, | ||
was_message_logged, | ||
) | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
MIN_NUMBER_OF_REQUESTS = 10 | ||
|
||
|
||
def validate_number_of_iterations( | ||
caplog, mock_requests, min_iterations=MIN_NUMBER_OF_REQUESTS | ||
): | ||
# assert that the requests_mock was called a reasonable number of times | ||
assert_that(mock_requests.call_count).is_greater_than(min_iterations) | ||
|
||
# check that a reasonable number of messages coming from the @task are in caplog | ||
_, starting_msgs = was_message_logged( | ||
caplog=caplog, | ||
target_message_re=r"VU \d+: Starting journey1_task", | ||
target_level=logging.INFO, | ||
) | ||
assert_that( | ||
len(starting_msgs), f"Actual contents of msg list {starting_msgs}" | ||
).is_greater_than(min_iterations) | ||
_, return_msgs = was_message_logged( | ||
caplog=caplog, | ||
target_message_re=r"VU \d+: Google result: [200|403|500]", | ||
target_level=logging.INFO, | ||
) | ||
assert_that( | ||
len(return_msgs), f"Actual contents of msg list {return_msgs}" | ||
).is_greater_than(min_iterations) | ||
|
||
|
||
def test__grasshopper__py__running_with_config_defaults( | ||
pytester, | ||
caplog, | ||
expected_output_messages, | ||
mock_requests_get_mix_status_codes, | ||
): | ||
"""Short, but complete, grasshopper run. | ||
|
||
Run directly against a journey py file, using mainly the configuration defaults. | ||
The non default items added are thresholds (found in the defaults on the class) | ||
and the runtime has been overridden to be shorter (so that the test isn't too long). | ||
|
||
""" | ||
dummy_journey_file = calculate_path( | ||
"test__journey1.py", subdir="../integration_testing_data" | ||
) | ||
|
||
pytester.copy_example(dummy_journey_file) | ||
perform_pytester_test_with_optional_log_capture( | ||
pytester, | ||
target="test__journey1.py", | ||
args=["--runtime=20"], # override time so test does not take _too_ long | ||
caplog=caplog, | ||
target_messages=expected_output_messages, | ||
) | ||
validate_number_of_iterations(caplog, mock_requests_get_mix_status_codes) | ||
|
||
|
||
def test__grasshopper__yaml__collect_entire_file( | ||
pytester, | ||
caplog, | ||
expected_output_messages_add_yaml_specific_cfg, | ||
mock_requests_get_mix_status_codes, | ||
): | ||
"""Short, but complete, grasshopper run. | ||
|
||
Run directly against a scenario file, not providing a scenario name. | ||
There is only one scenario in the scenario file being used. | ||
|
||
Per the grasshopper behavior, the scenario is providing some values that override | ||
the defaults, but not much that actually changes behavior. Thresholds used are from | ||
this file. | ||
|
||
Runtime is still being overridden so that the test does not take _too_ long. | ||
|
||
""" | ||
expected_output_messages = expected_output_messages_add_yaml_specific_cfg | ||
dummy_journey_file = calculate_path( | ||
"test__journey1.py", subdir="../integration_testing_data" | ||
) | ||
dummy_scenario_file = calculate_path( | ||
"single_scenario.yaml", subdir="../integration_testing_data" | ||
) | ||
|
||
pytester.copy_example(dummy_journey_file) | ||
pytester.copy_example(dummy_scenario_file) | ||
perform_pytester_test_with_optional_log_capture( | ||
pytester, | ||
target="single_scenario.yaml", | ||
args=["--runtime=20"], # override time so test does not take _too_ long | ||
caplog=caplog, | ||
target_messages=expected_output_messages, | ||
) | ||
validate_number_of_iterations(caplog, mock_requests_get_mix_status_codes) | ||
|
||
|
||
def test__grasshopper__yaml__collect_one_scenario( | ||
pytester, | ||
caplog, | ||
expected_output_messages_add_yaml_specific_cfg, | ||
mock_requests_get_mix_status_codes, | ||
): | ||
"""Short, but complete, grasshopper run. | ||
|
||
Run directly against a scenario file, using tags to select the scenario. Only one | ||
scenario should match in the file. | ||
|
||
Per the grasshopper behavior, the scenario is providing some values that override | ||
the defaults, but not much that actually changes behavior. Thresholds used are from | ||
this file. | ||
|
||
Runtime is still being overridden so that the test does not take _too_ long. | ||
|
||
""" | ||
expected_output_messages = expected_output_messages_add_yaml_specific_cfg | ||
dummy_journey_file = calculate_path( | ||
"test__journey1.py", subdir="../integration_testing_data" | ||
) | ||
dummy_scenario_file = calculate_path( | ||
"multiple_scenarios.yaml", subdir="../integration_testing_data" | ||
) | ||
|
||
pytester.copy_example(dummy_journey_file) | ||
pytester.copy_example(dummy_scenario_file) | ||
|
||
# this message appears when a scenario is collected | ||
expected_output_messages.append( | ||
construct_msg( | ||
r"Scenarios collected that match the specific tag query `scenario1`: " | ||
r"\['scenario1'\]" | ||
) | ||
) | ||
|
||
perform_pytester_test_with_optional_log_capture( | ||
pytester, | ||
target="multiple_scenarios.yaml", | ||
args=["--runtime=20", "--tags=scenario1"], # override time, select scenario | ||
caplog=caplog, | ||
target_messages=expected_output_messages, | ||
) | ||
validate_number_of_iterations( | ||
caplog, | ||
mock_requests_get_mix_status_codes, | ||
) | ||
|
||
|
||
def test__grasshopper__yaml__collect_multiple( | ||
pytester, | ||
caplog, | ||
expected_output_messages_add_yaml_specific_cfg, | ||
mock_requests_get_mix_status_codes, | ||
): | ||
"""Short, but complete, grasshopper run. | ||
|
||
Run directly against a scenario file, using tags to select the scenario. The tag | ||
should select 2 scenarios, which are essentially the same. | ||
|
||
Per the grasshopper behavior, the scenario is providing some values that override | ||
the defaults, but not much that actually changes behavior. Thresholds used are from | ||
this file. | ||
|
||
Runtime is still being overridden so that the test does not take _too_ long. | ||
|
||
""" | ||
expected_output_messages = expected_output_messages_add_yaml_specific_cfg | ||
dummy_journey_file = calculate_path( | ||
"test__journey1.py", subdir="../integration_testing_data" | ||
) | ||
dummy_scenario_file = calculate_path( | ||
"multiple_scenarios.yaml", subdir="../integration_testing_data" | ||
) | ||
|
||
pytester.copy_example(dummy_journey_file) | ||
pytester.copy_example(dummy_scenario_file) | ||
|
||
# this message appears when a scenario is collected | ||
expected_output_messages.append( | ||
construct_msg( | ||
r"Scenarios collected that match the specific tag query `trend`: " | ||
r"\['scenario1', 'scenario2'\]" | ||
) | ||
) | ||
|
||
perform_pytester_test_with_optional_log_capture( | ||
pytester, | ||
target="multiple_scenarios.yaml", | ||
args=["--runtime=20", "--tags=trend"], # override time, collect 2 scenarios | ||
outcomes={"passed": 2}, | ||
caplog=caplog, | ||
target_messages=expected_output_messages, | ||
) | ||
validate_number_of_iterations( | ||
caplog, | ||
mock_requests_get_mix_status_codes, | ||
min_iterations=MIN_NUMBER_OF_REQUESTS * 2, | ||
) |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this necessary? Why not just grab the entire stdout?