Skip to content

Commit

Permalink
refactor: Added type hints and docstrings to helper functions. Remove…
Browse files Browse the repository at this point in the history
…d unused helper functions
  • Loading branch information
pabloitu committed Aug 23, 2024
1 parent a91339b commit 0222942
Showing 1 changed file with 171 additions and 171 deletions.
342 changes: 171 additions & 171 deletions tests/test_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,177 +517,177 @@ def savefig(self, ax, name):
# gc.collect()
#
#
# class TestBatchPlots(TestPlots):
# def setUp(self):
# # Mocking EvaluationResult for testing
# self.mock_result = Mock()
# self.mock_result.sim_name = "Mock Forecast"
# self.mock_result.test_distribution = numpy.random.normal(loc=10, scale=2, size=100)
# self.mock_result.observed_statistic = 8
#
# def test_plot_consistency_basic(self):
# ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots)
# self.assertEqual(ax.get_title(), '')
# self.assertEqual(ax.get_xlabel(), "Statistic distribution")
#
# def test_plot_consistency_with_multiple_results(self):
# mock_results = [self.mock_result for _ in range(5)]
# ax = plot_consistency_test(eval_results=mock_results, show=show_plots)
# self.assertEqual(len(ax.get_yticklabels()), 5)
#
# def test_plot_consistency_with_normalization(self):
# ax = plot_consistency_test(eval_results=self.mock_result, normalize=True,
# show=show_plots)
# # Assert that the observed statistic is plotted at 0
# self.assertEqual(ax.lines[0].get_xdata(), 0)
#
# def test_plot_consistency_with_one_sided_lower(self):
# mock_result = copy.deepcopy(self.mock_result)
# # THe observed statistic is placed to the right of the model test distribution.
# mock_result.observed_statistic = max(self.mock_result.test_distribution) + 1
# ax = plot_consistency_test(eval_results=mock_result, one_sided_lower=True,
# show=show_plots)
# # The end of the infinite dashed line should extend way away from the plot limit
# self.assertGreater(ax.lines[-1].get_xdata()[-1], ax.get_xlim()[1])
#
# def test_plot_consistency_with_custom_percentile(self):
# ax = plot_consistency_test(eval_results=self.mock_result, percentile=99,
# show=show_plots)
#
# # Check that the line extent equals the lower 0.5 % percentile
# self.assertAlmostEqual(ax.lines[2].get_xdata(),
# numpy.percentile(self.mock_result.test_distribution, 0.5))
#
# def test_plot_consistency_with_variance(self):
# mock_nb = copy.deepcopy(self.mock_result)
# mock_poisson = copy.deepcopy(self.mock_result)
# mock_nb.test_distribution = ('negative_binomial', 8)
# mock_poisson.test_distribution = ('poisson', 8)
# ax_nb = plot_consistency_test(eval_results=mock_nb, variance=16, show=show_plots)
# ax_p = plot_consistency_test(eval_results=mock_poisson, variance=None, show=show_plots)
# # Ensure the negative binomial has a larger x-axis extent than poisson
# self.assertTrue(ax_p.get_xlim()[1] < ax_nb.get_xlim()[1])
#
# def test_plot_consistency_with_custom_plot_args(self):
# ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots,
# xlabel="Custom X", ylabel="Custom Y", title="Custom Title")
# self.assertEqual(ax.get_xlabel(), "Custom X")
# self.assertEqual(ax.get_title(), "Custom Title")
#
# def test_plot_consistency_with_mean(self):
# ax = plot_consistency_test(eval_results=self.mock_result, plot_mean=True,
# show=show_plots)
# # Check for the mean line plotted as a circle
# self.assertTrue(any(["o" in str(line.get_marker()) for line in ax.lines]))
#
# def test_SingleNTestPlot(self):
#
# expected_val = numpy.random.randint(0, 20)
# observed_val = numpy.random.randint(0, 20)
# Ntest_result = mock.Mock()
# Ntest_result.name = "Mock NTest"
# Ntest_result.sim_name = "Mock SimName"
# Ntest_result.test_distribution = ["poisson", expected_val]
# Ntest_result.observed_statistic = observed_val
# matplotlib.pyplot.close()
# plot_consistency_test(Ntest_result, show=show_plots)
#
# if not show_plots:
# self.assertEqual(
# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
# [i.sim_name for i in [Ntest_result]],
# )
# self.assertEqual(matplotlib.pyplot.gca().get_title(), '')
#
# def test_MultiNTestPlot(self):
#
# n_plots = numpy.random.randint(1, 20)
# Ntests = []
# for n in range(n_plots):
# Ntest_result = mock.Mock()
# Ntest_result.name = "Mock NTest"
# Ntest_result.sim_name = "".join(
# random.choice(string.ascii_letters) for _ in range(8)
# )
# Ntest_result.test_distribution = ["poisson", numpy.random.randint(0, 20)]
# Ntest_result.observed_statistic = numpy.random.randint(0, 20)
# Ntests.append(Ntest_result)
# matplotlib.pyplot.close()
# plot_consistency_test(Ntests, show=show_plots)
# Ntests.reverse()
# if not show_plots:
# self.assertEqual(
# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
# [i.sim_name for i in Ntests],
# )
#
# def test_MultiSTestPlot(self):
#
# s_plots = numpy.random.randint(1, 20)
# Stests = []
# for n in range(s_plots):
# Stest_result = mock.Mock() # Mock class with random attributes
# Stest_result.name = "Mock STest"
# Stest_result.sim_name = "".join(
# random.choice(string.ascii_letters) for _ in range(8)
# )
# Stest_result.test_distribution = numpy.random.uniform(
# -1000, 0, numpy.random.randint(3, 500)
# ).tolist()
# Stest_result.observed_statistic = numpy.random.uniform(
# -1000, 0
# ) # random observed statistic
# if numpy.random.random() < 0.02: # sim possible infinite values
# Stest_result.observed_statistic = -numpy.inf
# Stests.append(Stest_result)
# matplotlib.pyplot.close()
# plot_consistency_test(Stests)
# Stests.reverse()
# self.assertEqual(
# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
# [i.sim_name for i in Stests],
# )
#
# def test_MultiTTestPlot(self):
#
# for i in range(1):
# t_plots = numpy.random.randint(2, 20)
# t_tests = []
#
# def rand(limit=10, offset=0.):
# return limit * (numpy.random.random() - offset)
#
# for n in range(t_plots):
# t_result = mock.Mock() # Mock class with random attributes
# t_result.name = "CSEP1 Comparison Test"
# t_result.sim_name = (
# "".join(random.choice(string.ascii_letters) for _ in range(8)),
# "ref",
# )
# t_result.observed_statistic = rand(offset=0.5)
# t_result.test_distribution = [
# t_result.observed_statistic - rand(5),
# t_result.observed_statistic + rand(5),
# ]
#
# if numpy.random.random() < 0.05: # sim possible infinite values
# t_result.observed_statistic = -numpy.inf
# t_tests.append(t_result)
# matplotlib.pyplot.close()
# plot_comparison_test(t_tests, show=show_plots)
# t_tests.reverse()
# if not show_plots:
# self.assertEqual(
# [i.get_text() for i in matplotlib.pyplot.gca().get_xticklabels()],
# [i.sim_name[0] for i in t_tests[::-1]],
# )
# self.assertEqual(matplotlib.pyplot.gca().get_title(), t_tests[0].name)
#
# def tearDown(self):
# plt.close("all")
#
# gc.collect()
#
class TestBatchPlots(TestPlots):
def setUp(self):
# Mocking EvaluationResult for testing
self.mock_result = Mock()
self.mock_result.sim_name = "Mock Forecast"
self.mock_result.test_distribution = numpy.random.normal(loc=10, scale=2, size=100)
self.mock_result.observed_statistic = 8

def test_plot_consistency_basic(self):
ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots)
self.assertEqual(ax.get_title(), '')
self.assertEqual(ax.get_xlabel(), "Statistic distribution")

def test_plot_consistency_with_multiple_results(self):
mock_results = [self.mock_result for _ in range(5)]
ax = plot_consistency_test(eval_results=mock_results, show=show_plots)
self.assertEqual(len(ax.get_yticklabels()), 5)

def test_plot_consistency_with_normalization(self):
ax = plot_consistency_test(eval_results=self.mock_result, normalize=True,
show=show_plots)
# Assert that the observed statistic is plotted at 0
self.assertEqual(ax.lines[0].get_xdata(), 0)

def test_plot_consistency_with_one_sided_lower(self):
mock_result = copy.deepcopy(self.mock_result)
# THe observed statistic is placed to the right of the model test distribution.
mock_result.observed_statistic = max(self.mock_result.test_distribution) + 1
ax = plot_consistency_test(eval_results=mock_result, one_sided_lower=True,
show=show_plots)
# The end of the infinite dashed line should extend way away from the plot limit
self.assertGreater(ax.lines[-1].get_xdata()[-1], ax.get_xlim()[1])

def test_plot_consistency_with_custom_percentile(self):
ax = plot_consistency_test(eval_results=self.mock_result, percentile=99,
show=show_plots)

# Check that the line extent equals the lower 0.5 % percentile
self.assertAlmostEqual(ax.lines[2].get_xdata(),
numpy.percentile(self.mock_result.test_distribution, 0.5))

def test_plot_consistency_with_variance(self):
mock_nb = copy.deepcopy(self.mock_result)
mock_poisson = copy.deepcopy(self.mock_result)
mock_nb.test_distribution = ('negative_binomial', 8)
mock_poisson.test_distribution = ('poisson', 8)
ax_nb = plot_consistency_test(eval_results=mock_nb, variance=16, show=show_plots)
ax_p = plot_consistency_test(eval_results=mock_poisson, variance=None, show=show_plots)
# Ensure the negative binomial has a larger x-axis extent than poisson
self.assertTrue(ax_p.get_xlim()[1] < ax_nb.get_xlim()[1])

def test_plot_consistency_with_custom_plot_args(self):
ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots,
xlabel="Custom X", ylabel="Custom Y", title="Custom Title")
self.assertEqual(ax.get_xlabel(), "Custom X")
self.assertEqual(ax.get_title(), "Custom Title")

def test_plot_consistency_with_mean(self):
ax = plot_consistency_test(eval_results=self.mock_result, plot_mean=True,
show=show_plots)
# Check for the mean line plotted as a circle
self.assertTrue(any(["o" in str(line.get_marker()) for line in ax.lines]))

def test_SingleNTestPlot(self):

expected_val = numpy.random.randint(0, 20)
observed_val = numpy.random.randint(0, 20)
Ntest_result = mock.Mock()
Ntest_result.name = "Mock NTest"
Ntest_result.sim_name = "Mock SimName"
Ntest_result.test_distribution = ["poisson", expected_val]
Ntest_result.observed_statistic = observed_val
matplotlib.pyplot.close()
plot_consistency_test(Ntest_result, show=show_plots)

if not show_plots:
self.assertEqual(
[i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
[i.sim_name for i in [Ntest_result]],
)
self.assertEqual(matplotlib.pyplot.gca().get_title(), '')

def test_MultiNTestPlot(self):

n_plots = numpy.random.randint(1, 20)
Ntests = []
for n in range(n_plots):
Ntest_result = mock.Mock()
Ntest_result.name = "Mock NTest"
Ntest_result.sim_name = "".join(
random.choice(string.ascii_letters) for _ in range(8)
)
Ntest_result.test_distribution = ["poisson", numpy.random.randint(0, 20)]
Ntest_result.observed_statistic = numpy.random.randint(0, 20)
Ntests.append(Ntest_result)
matplotlib.pyplot.close()
plot_consistency_test(Ntests, show=show_plots)
Ntests.reverse()
if not show_plots:
self.assertEqual(
[i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
[i.sim_name for i in Ntests],
)

def test_MultiSTestPlot(self):

s_plots = numpy.random.randint(1, 20)
Stests = []
for n in range(s_plots):
Stest_result = mock.Mock() # Mock class with random attributes
Stest_result.name = "Mock STest"
Stest_result.sim_name = "".join(
random.choice(string.ascii_letters) for _ in range(8)
)
Stest_result.test_distribution = numpy.random.uniform(
-1000, 0, numpy.random.randint(3, 500)
).tolist()
Stest_result.observed_statistic = numpy.random.uniform(
-1000, 0
) # random observed statistic
if numpy.random.random() < 0.02: # sim possible infinite values
Stest_result.observed_statistic = -numpy.inf
Stests.append(Stest_result)
matplotlib.pyplot.close()
plot_consistency_test(Stests)
Stests.reverse()
self.assertEqual(
[i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()],
[i.sim_name for i in Stests],
)

def test_MultiTTestPlot(self):

for i in range(1):
t_plots = numpy.random.randint(2, 20)
t_tests = []

def rand(limit=10, offset=0.):
return limit * (numpy.random.random() - offset)

for n in range(t_plots):
t_result = mock.Mock() # Mock class with random attributes
t_result.name = "CSEP1 Comparison Test"
t_result.sim_name = (
"".join(random.choice(string.ascii_letters) for _ in range(8)),
"ref",
)
t_result.observed_statistic = rand(offset=0.5)
t_result.test_distribution = [
t_result.observed_statistic - rand(5),
t_result.observed_statistic + rand(5),
]

if numpy.random.random() < 0.05: # sim possible infinite values
t_result.observed_statistic = -numpy.inf
t_tests.append(t_result)
matplotlib.pyplot.close()
plot_comparison_test(t_tests, show=show_plots)
t_tests.reverse()
if not show_plots:
self.assertEqual(
[i.get_text() for i in matplotlib.pyplot.gca().get_xticklabels()],
[i.sim_name[0] for i in t_tests[::-1]],
)
self.assertEqual(matplotlib.pyplot.gca().get_title(), t_tests[0].name)

def tearDown(self):
plt.close("all")

gc.collect()


class TestPlotBasemap(TestPlots):

Expand Down

0 comments on commit 0222942

Please sign in to comment.