Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding some integration tests #19

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/integration/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Module: Integration Tests."""
99 changes: 93 additions & 6 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,103 @@
Conftest for use with integration tests for Grasshopper.

"""
import logging
import os
import random

# import pytest
import gevent
import pytest
from requests import Request
from requests_mock import ANY

pytest_plugins = ["pytester"]

GRASSHOPPER_CONFIG_FILE_PATH = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "grasshopper.config"
)

# Leaving this here because sometimes we want to turn it on for testing, but we don't
# want to use a config file unless the grasshopper consumer supplies one
# @pytest.fixture(scope="session")
# def grasshopper_config_file_path():
# return GRASSHOPPER_CONFIG_FILE_PATH

@pytest.fixture
def std_gh_output_msgs():
msgs = [
construct_msg("-+ Grasshopper configuration -+"),
construct_msg("-+ THRESHOLD REPORT -+"),
construct_msg("-+ CHECKS REPORT -+"),
construct_msg("-+ /Grasshopper configuration -+"),
]

return msgs


@pytest.fixture
def std_gh_out_with_cfg_msgs(std_gh_output_msgs):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this necessary? Why not just grab the entire stdout?

std_gh_output_msgs.append(construct_msg(r"shape: \[Default\]"))
std_gh_output_msgs.append(construct_msg(r"users: \[\d+\]"))
std_gh_output_msgs.append(construct_msg(r"runtime: \[\d+(\.\d+)?\]"))
std_gh_output_msgs.append(construct_msg(r"spawn_rate: \[\d+\.\d+\]"))
std_gh_output_msgs.append(construct_msg(r"scenario_delay: \[\d+\.\d+\]"))
std_gh_output_msgs.append(
construct_msg(
r"shape_instance: \[<grasshopper.lib.util.shapes.Default object at (.+)>\]"
)
)
std_gh_output_msgs.append(
construct_msg(r"user_classes: \[{<class 'test__journey1.Journey1'>: 1}\]")
)
return std_gh_output_msgs


@pytest.fixture
def gh_out_add_trends_and_checks(std_gh_out_with_cfg_msgs):
trend_base = r"\s+\d\d\s+(\d+)ms\s+(\d+)ms"
check_base = r"\s+\d+\s+\d+\s+\d+\s+\d+(\.\d+)?"

std_gh_out_with_cfg_msgs.append(construct_msg("PX_TREND_google_home" + trend_base))
std_gh_out_with_cfg_msgs.append(construct_msg("google_home" + trend_base))
std_gh_out_with_cfg_msgs.append(construct_msg("Status code is good" + check_base))

return std_gh_out_with_cfg_msgs


@pytest.fixture
def expected_output_messages(gh_out_add_trends_and_checks):
gh_out_add_trends_and_checks.append(
construct_msg("Check failed: Status code is good", target_level=logging.WARNING)
)
return gh_out_add_trends_and_checks


@pytest.fixture
def expected_output_messages_add_yaml_specific_cfg(expected_output_messages):
expected_output_messages.append(
construct_msg(r"scenario_test_file_name: \[test__journey1.py\]")
)
expected_output_messages.append(
construct_msg(r"scenario_tags: \[\['smoke', 'trend'\]\]")
)
expected_output_messages.append(
construct_msg(r"scenario_file: \[.*scenario(s)?.yaml\]")
)
expected_output_messages.append(construct_msg(r"scenario_name: \[scenario\d+\]"))
return expected_output_messages


def construct_msg(msg_re, target_level=logging.INFO):
"""Construct a message dictionary, for use with validation."""
msg = {
"target_level": target_level,
"target_message_re": msg_re,
}
return msg


@pytest.fixture
def mock_requests_get_mix_status_codes(requests_mock):
def response_provider_mix_of_status_codes(request: Request, context):
context.status_code = random.choice([200, 200, 200, 201, 403, 500])
sleep_time = random.randint(1, 3) / 10
gevent.sleep(sleep_time)
return {"data": f"mocked response {context.status_code}"}

requests_mock.get(ANY, json=response_provider_mix_of_status_codes)
return requests_mock

This file was deleted.

26 changes: 26 additions & 0 deletions tests/integration/test__configuration_loading.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import logging

from assertpy import assert_that

from grasshopper.lib.configuration.gh_configuration import GHConfiguration

logger = logging.getLogger(__name__)


def test__complete_configuration__simplest_case(complete_configuration):
"""Validate the whole 'stack' of fixtures that produce complete_configuration.

This test validates that all the fixtures feed into complete_configuration without
errors and that the result of the entire process is a GHConfiguration object with
about the right amount (currently there are 12, but this may change if more
defaults are added).

Note that in this case, most of the fixtures are not contributing additional
values.

"""
logger.debug(f"COMPLETE CONFIGURATION: {complete_configuration}")

assert_that(complete_configuration).is_instance_of(GHConfiguration)
# without supplying much, complete configuration should be >=12 items
assert_that(len(complete_configuration)).is_greater_than(11)
207 changes: 207 additions & 0 deletions tests/integration/test__grasshopper_runs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
import logging

from assertpy import assert_that

from tests.integration.conftest import construct_msg # noqa: I202
from tests.unit.conftest import ( # noqa: I202
calculate_path,
perform_pytester_test_with_optional_log_capture,
was_message_logged,
)

logger = logging.getLogger(__name__)

MIN_NUMBER_OF_REQUESTS = 10


def validate_number_of_iterations(
caplog, mock_requests, min_iterations=MIN_NUMBER_OF_REQUESTS
):
# assert that the requests_mock was called a reasonable number of times
assert_that(mock_requests.call_count).is_greater_than(min_iterations)

# check that a reasonable number of messages coming from the @task are in caplog
_, starting_msgs = was_message_logged(
caplog=caplog,
target_message_re=r"VU \d+: Starting journey1_task",
target_level=logging.INFO,
)
assert_that(
len(starting_msgs), f"Actual contents of msg list {starting_msgs}"
).is_greater_than(min_iterations)
_, return_msgs = was_message_logged(
caplog=caplog,
target_message_re=r"VU \d+: Google result: [200|403|500]",
target_level=logging.INFO,
)
assert_that(
len(return_msgs), f"Actual contents of msg list {return_msgs}"
).is_greater_than(min_iterations)


def test__grasshopper__py__running_with_config_defaults(
pytester,
caplog,
expected_output_messages,
mock_requests_get_mix_status_codes,
):
"""Short, but complete, grasshopper run.

Run directly against a journey py file, using mainly the configuration defaults.
The non default items added are thresholds (found in the defaults on the class)
and the runtime has been overridden to be shorter (so that the test isn't too long).

"""
dummy_journey_file = calculate_path(
"test__journey1.py", subdir="../integration_testing_data"
)

pytester.copy_example(dummy_journey_file)
perform_pytester_test_with_optional_log_capture(
pytester,
target="test__journey1.py",
args=["--runtime=20"], # override time so test does not take _too_ long
caplog=caplog,
target_messages=expected_output_messages,
)
validate_number_of_iterations(caplog, mock_requests_get_mix_status_codes)


def test__grasshopper__yaml__collect_entire_file(
pytester,
caplog,
expected_output_messages_add_yaml_specific_cfg,
mock_requests_get_mix_status_codes,
):
"""Short, but complete, grasshopper run.

Run directly against a scenario file, not providing a scenario name.
There is only one scenario in the scenario file being used.

Per the grasshopper behavior, the scenario is providing some values that override
the defaults, but not much that actually changes behavior. Thresholds used are from
this file.

Runtime is still being overridden so that the test does not take _too_ long.

"""
expected_output_messages = expected_output_messages_add_yaml_specific_cfg
dummy_journey_file = calculate_path(
"test__journey1.py", subdir="../integration_testing_data"
)
dummy_scenario_file = calculate_path(
"single_scenario.yaml", subdir="../integration_testing_data"
)

pytester.copy_example(dummy_journey_file)
pytester.copy_example(dummy_scenario_file)
perform_pytester_test_with_optional_log_capture(
pytester,
target="single_scenario.yaml",
args=["--runtime=20"], # override time so test does not take _too_ long
caplog=caplog,
target_messages=expected_output_messages,
)
validate_number_of_iterations(caplog, mock_requests_get_mix_status_codes)


def test__grasshopper__yaml__collect_one_scenario(
pytester,
caplog,
expected_output_messages_add_yaml_specific_cfg,
mock_requests_get_mix_status_codes,
):
"""Short, but complete, grasshopper run.

Run directly against a scenario file, using tags to select the scenario. Only one
scenario should match in the file.

Per the grasshopper behavior, the scenario is providing some values that override
the defaults, but not much that actually changes behavior. Thresholds used are from
this file.

Runtime is still being overridden so that the test does not take _too_ long.

"""
expected_output_messages = expected_output_messages_add_yaml_specific_cfg
dummy_journey_file = calculate_path(
"test__journey1.py", subdir="../integration_testing_data"
)
dummy_scenario_file = calculate_path(
"multiple_scenarios.yaml", subdir="../integration_testing_data"
)

pytester.copy_example(dummy_journey_file)
pytester.copy_example(dummy_scenario_file)

# this message appears when a scenario is collected
expected_output_messages.append(
construct_msg(
r"Scenarios collected that match the specific tag query `scenario1`: "
r"\['scenario1'\]"
)
)

perform_pytester_test_with_optional_log_capture(
pytester,
target="multiple_scenarios.yaml",
args=["--runtime=20", "--tags=scenario1"], # override time, select scenario
caplog=caplog,
target_messages=expected_output_messages,
)
validate_number_of_iterations(
caplog,
mock_requests_get_mix_status_codes,
)


def test__grasshopper__yaml__collect_multiple(
pytester,
caplog,
expected_output_messages_add_yaml_specific_cfg,
mock_requests_get_mix_status_codes,
):
"""Short, but complete, grasshopper run.

Run directly against a scenario file, using tags to select the scenario. The tag
should select 2 scenarios, which are essentially the same.

Per the grasshopper behavior, the scenario is providing some values that override
the defaults, but not much that actually changes behavior. Thresholds used are from
this file.

Runtime is still being overridden so that the test does not take _too_ long.

"""
expected_output_messages = expected_output_messages_add_yaml_specific_cfg
dummy_journey_file = calculate_path(
"test__journey1.py", subdir="../integration_testing_data"
)
dummy_scenario_file = calculate_path(
"multiple_scenarios.yaml", subdir="../integration_testing_data"
)

pytester.copy_example(dummy_journey_file)
pytester.copy_example(dummy_scenario_file)

# this message appears when a scenario is collected
expected_output_messages.append(
construct_msg(
r"Scenarios collected that match the specific tag query `trend`: "
r"\['scenario1', 'scenario2'\]"
)
)

perform_pytester_test_with_optional_log_capture(
pytester,
target="multiple_scenarios.yaml",
args=["--runtime=20", "--tags=trend"], # override time, collect 2 scenarios
outcomes={"passed": 2},
caplog=caplog,
target_messages=expected_output_messages,
)
validate_number_of_iterations(
caplog,
mock_requests_get_mix_status_codes,
min_iterations=MIN_NUMBER_OF_REQUESTS * 2,
)
Loading