From d387cd3a9a03efb9ce7f43c89e0eac0b6d9cc0e2 Mon Sep 17 00:00:00 2001 From: pciturri Date: Sat, 28 Sep 2024 16:49:31 +0200 Subject: [PATCH] docs: Finished the evaluation config docs. Added .js to custom behavior of external links (now open in a new tab). Added sphinx-design for expandable tables. --- docs/_static/custom.js | 31 +++++ docs/conf.py | 5 + docs/guide/evaluation_config.rst | 198 ++++++++++++++++++++++--------- docs/guide/experiment_config.rst | 3 +- docs/guide/model_config.rst | 1 + docs/tutorials/case_a.rst | 2 +- docs/tutorials/case_c.rst | 4 +- tutorials/case_f/tests.yml | 19 ++- tutorials/case_g/tests.yml | 13 +- 9 files changed, 201 insertions(+), 75 deletions(-) create mode 100644 docs/_static/custom.js diff --git a/docs/_static/custom.js b/docs/_static/custom.js new file mode 100644 index 0000000..9def49d --- /dev/null +++ b/docs/_static/custom.js @@ -0,0 +1,31 @@ +/** + * custom.js + * + * This script contains custom JavaScript modifications for the Sphinx documentation. + * It can be expanded to include additional customizations related to behavior, + * style, and functionality of the generated documentation. + * + * + * Usage: + * - Place this script in the _static directory of your Sphinx project. + * - Include it in the html_js_files configuration in conf.py to load it automatically. + * - Expand this file with other JavaScript customizations as needed. + * + * Author: Pablo Iturrieta + * Date: 28.09.2024 + */ + +document.addEventListener("DOMContentLoaded", function () { +// - Ensures that all external links open in a new tab by adding the target="_blank" +// attribute to all links with the 'external' class (automatically applied by Sphinx). +// - Adds rel="noopener noreferrer" for security purposes, ensuring the new page +// does not have access to the originating window context (prevents security risks). + // Select all external links in the documentation + const links = document.querySelectorAll('a.external'); + + // Loop through all the links and set them to open in a new tab + links.forEach(function (link) { + link.setAttribute('target', '_blank'); + link.setAttribute('rel', 'noopener noreferrer'); + }); +}); diff --git a/docs/conf.py b/docs/conf.py index 32e462c..8b7cac7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,6 +30,7 @@ "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "sphinx_copybutton", + "sphinx_design", ] # language = 'en' @@ -66,6 +67,10 @@ "logo_only": True, } html_logo = "_static/floatcsep_logo.svg" +html_js_files = [ + "custom.js", +] + todo_include_todos = False copybutton_prompt_text = "$ " # Text to ignore when copying (for shell commands) diff --git a/docs/guide/evaluation_config.rst b/docs/guide/evaluation_config.rst index f17c6e8..89058f3 100644 --- a/docs/guide/evaluation_config.rst +++ b/docs/guide/evaluation_config.rst @@ -3,21 +3,20 @@ Evaluations Definition ====================== -**floatCSEP** evaluate forecasts using the testing procedures from **pyCSEP** (See `Testing Theory `_). Depending on the forecast type (e.g., **GriddedForecasts** or **CatalogForecasts**), different evaluation functions can be used. T +**floatCSEP** evaluate forecasts using the routines defined in **pyCSEP** (See `Testing Theory `_). Depending on the forecast types (e.g., **GriddedForecasts** or **CatalogForecasts**), different evaluation functions can be used. -Each evaluation specifies a `func` parameter, representing the evaluation function to be applied, and a `plot_func` parameter for visualizing the results. +Each evaluation specifies a ``func`` parameter, representing the evaluation function to be applied, a configuration of the function with ``func_kwargs`` (e.g., number of simulations, confidence intervals) and a ``plot_func`` parameter for visualizing the results. Evaluations for **GriddedForecasts** typically use functions from :mod:`csep.core.poisson_evaluations` or :mod:`csep.core.binomial_evaluations`, while evaluations for **CatalogForecasts** use functions from :mod:`csep.core.catalog_evaluations`. -Evaluations for **GriddedForecasts** typically use functions from :mod:`csep.core.poisson_evaluations` or :mod:`csep.core.binomial_evaluations`, while evaluations for **CatalogForecasts** use functions from :mod:`csep.core.catalog_evaluations`. +.. important:: + + An evaluation in ``test_config`` points to a **pyCSEP** `evaluation function `_, valid for the forecast class. -The structure of the evaluation configuration file is similar to the model configuration, with multiple tests, each pointing to a specific evaluation function and plotting method. **Example Configuration**: .. code-block:: yaml + :caption: test_config.yml - - N-test: - func: poisson_evaluations.number_test - plot_func: plot_poisson_consistency_test - S-test: func: poisson_evaluations.spatial_test plot_func: plot_poisson_consistency_test @@ -32,66 +31,157 @@ The structure of the evaluation configuration file is similar to the model confi Evaluation Parameters: ---------------------- +Each evaluation listed in ``test_config`` accepts the following parameters: + .. list-table:: - :widths: 20 80 + :widths: 30 80 :header-rows: 1 * - **Parameter** - **Description** * - **func** (required) - - The evaluation function, specifying which test to run. Must be an available function from the pyCSEP evaluation suite (e.g., `poisson_evaluations.number_test`). + - Specify which evaluation/test function to run. Must be a **pyCSEP** ``{module}.{function}`` suite \ + (e.g., :func:`poisson_evaluations.number_test `) or + **floatCSEP** function. + * - **func_kwargs** + - Any keyword argument to control the specific **func**. For example, :func:`poisson_evaluations.spatial_test ` may be configured with ``num_simulations: 2000``. * - **plot_func** (required) - - The function to plot the evaluation results, specified from the available plotting functions (e.g., `plot_poisson_consistency_test`). + - The function to plot the evaluation results, from either the :mod:`csep.utils.plots` module (e.g., :func:`plot_poisson_consistency_test `) or **floatCSEP** :mod:`~floatcsep.utils.helpers` module. * - **plot_args** - - Arguments passed to customize plot titles, labels, or font size. + - Arguments passed to customize the plot function. Can be titles, labels, colors, font size, etc. Review the documentation of the respective function. * - **plot_kwargs** - - Keyword arguments passed to the plotting function for fine-tuning plot appearance (e.g., `one_sided_lower: True`). + - Keyword arguments to customize the plot function. Review the documentation of the respective function. * - **ref_model** - A reference model against which the current model is compared in comparative tests (e.g., `Model A`). * - **markdown** - A description of the test to be used as caption when reporting results -Evaluations Functions: ----------------------- - -Depending on the type of forecast being evaluated, different evaluation functions are used: - -1. **GriddedForecasts**: - -.. list-table:: - :widths: 20 80 - :header-rows: 1 - - * - **Function** - - **Description** - * - **poisson_evaluations.number_test** - - Evaluates the forecast by comparing the total number of forecasted events with the observed events using a Poisson distribution. - * - **poisson_evaluations.spatial_test** - - Compares the spatial distribution of forecasted events to the observed events. - * - **poisson_evaluations.magnitude_test** - - Evaluates the forecast by comparing the magnitude distribution of forecasted events with observed events. - * - **poisson_evaluations.conditional_likelihood_test** - - Tests the likelihood of observed events given the forecasted rates, conditioned on the total earthquake occurrences. - * - **poisson_evaluations.paired_t_test** - - Calculate the information gain between one forecast to a reference (``ref_model``), and test a significant difference by using a paired T-test. - * - **binomial_evaluations.binary_spatial_test** - - Binary spatial test to compare forecasted and observed event distributions. - * - **binomial_evaluations.binary_likelihood_test** - - Likelihood test likelihood of observed events given the forecasted rates, assuming a Binary distribution - * - **binomial_evaluations.negative_binomial_number_test** - - Evaluates the number of events using a negative binomial distribution, comparing observed and forecasted event counts. - * - **brier_score** - - Uses a quadratic metric rather than logarithmic. Does not penalize false-negatives as much as log-likelihood metrics - * - **vector_poisson_t_w_test** - - Carries out the paired_t_test and w_test for a single forecast compared to multiple. - * - **sequential_likelihood** - - Obtain the distribution of log-likelihoods in time. - * - **sequential_information_gain** - - Obtain the distribution of information gain in time, compared to a ``ref_model``. - - -2. **CatalogForecasts**: - - +Evaluations Functions +--------------------- + +**floatCSEP** supports the following evaluations: + + +.. dropdown:: **Evaluations for GriddedForecasts** + :animate: fade-in-slide-down + :icon: list-unordered + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - **Function** + - **Evaluates:** + * - :func:`poisson_evaluations.number_test ` + - the total number of forecasted events compared to the observed events using a Poisson distribution. + * - :func:`poisson_evaluations.spatial_test ` + - the forecasted spatial distribution relative to the observed events using a Poisson distribution. + * - :func:`poisson_evaluations.magnitude_test ` + - the forecasted magnitude distribution relative to the observed events using a Poisson distribution. + * - :func:`poisson_evaluations.conditional_likelihood_test ` + - the likelihood of the observed events given the forecasted rates, conditioned on the total earthquake occurrences, assuming a Poisson distribution. + * - :func:`poisson_evaluations.paired_t_test ` + - the information gain between one forecast to a reference (``ref_model``), and test for a significant difference by using a paired T-test. + * - :func:`binomial_evaluations.binary_spatial_test ` + - the forecasted spatial distribution relative to the observed events, assuming a Binary/Bernoulli process. + * - :func:`binomial_evaluations.binary_likelihood_test ` + - the likelihood of the observed events given the forecasted rates, assuming a Binary distribution. + * - :func:`binomial_evaluations.negative_binomial_number_test ` + - the total number of forecasted events compared to the observed events using a Negative Binomial distribution. + * - :func:`brier_score ` + - the forecast skill using a quadratic metric rather than logarithmic. Does not penalize false-negatives as much as log-likelihood metrics. + * - :func:`vector_poisson_t_w_test ` + - a forecast skill compared to multiple forecasts, by carrying out the paired_t_test and w_test jointly. + * - :func:`sequential_likelihood ` + - the temporal evolution of log-likelihoods scores. + * - :func:`sequential_information_gain ` + - the temporal evolution of the information gain in time, compared to a ``ref_model``. + + + +.. dropdown:: **Evaluations for CatalogForecasts** + :animate: fade-in-slide-down + :icon: list-unordered + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - **Function** + - **Evaluates:** + * - :func:`catalog_evaluations.number_test ` + - the total number of forecasted events compared to observed events in an earthquake catalog. + * - :func:`catalog_evaluations.spatial_test ` + - the spatial distribution of forecasted vs. observed earthquake events in an earthquake catalog. + * - :func:`catalog_evaluations.magnitude_test ` + - the magnitude distribution of forecasted events to those observed in the earthquake catalog. + * - :func:`catalog_evaluations.pseudolikelihood_test ` + - the pseudolikelihood of the observed events, given the forecasted synthetic catalogs + * - :func:`catalog_evaluations.calibration_test ` + - the consistency of multiple test-quantiles in time with the expected uniform distribution using a Kolmogorov-Smirnov test. + +.. note:: + + Check each function's `docstring` to see which ``func_kwargs`` are compatible with it. + +Plotting Functions +------------------ + +**floatCSEP** supports the following: + +.. dropdown:: Plotting functions + :animate: fade-in-slide-down + :icon: list-unordered + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - **Plotting function** + - **Compatible with:** + * - :obj:`~csep.utils.plots.plot_poisson_consistency_test` + - :func:`poisson_evaluations.number_test `, :func:`poisson_evaluations.spatial_test `, :func:`poisson_evaluations.magnitude_test `, :func:`poisson_evaluations.conditional_likelihood_test `. + * - :obj:`~csep.utils.plots.plot_consistency_test` + - :func:`binomial_evaluations.negative_binomial_number_test `, :func:`binomial_evaluations.binary_likelihood_test `, :func:`binomial_evaluations.binary_spatial_test `, :func:`brier_score `, :func:`catalog_evaluations.number_test `, :func:`catalog_evaluations.magnitude_test `, :func:`catalog_evaluations.spatial_test `, :func:`catalog_evaluations.pseudolikelihood_test ` + * - :obj:`~csep.utils.plots.plot_comparison_test` + - :func:`poisson_evaluations.paired_t_test ` + * - :obj:`~csep.utils.plots.plot_number_test` + - :func:`catalog_evaluations.number_test ` + * - :obj:`~csep.utils.plots.plot_magnitude_test` + - :func:`catalog_evaluations.magnitude_test ` + * - :obj:`~csep.utils.plots.plot_spatial_test` + - :func:`catalog_evaluations.spatial_test ` + * - :obj:`~csep.utils.plots.plot_likelihood_test` + - :func:`catalog_evaluations.pseudolikelihood_test ` + * - :obj:`~csep.utils.plots.plot_calibration_test` + - :func:`catalog_evaluations.calibration_test ` + * - :obj:`~floatcsep.utils.helpers.plot_sequential_likelihood>` + - :func:`sequential_likelihood `, :func:`sequential_information_gain ` + * - :obj:`~floatcsep.utils.helpers.plot_matrix_comparative_test` + - :func:`vector_poisson_t_w_test ` + +.. note:: + + Check each plot functions's `docstring` to see which ``plot_args`` and ``plot_kwargs`` are compatible with it. + + + +It is also possible to assign two or more plotting functions to a test, the ``plot_args`` and ``plot_kwargs`` of which can be placed as dictionaries indented beneath the functions: + +**Example**: +.. code-block:: yaml + :caption: test_config.yml + + - Number Test: + func: catalog_evaluations.number_test + plot_func: + - plot_number_test: + plot_args: + title: Number test distribution + - plot_consistency_test: + plot_args: + linewidth: 2 + plot_kwargs: + one_sided_lower: True diff --git a/docs/guide/experiment_config.rst b/docs/guide/experiment_config.rst index 6ade1b7..a5c513a 100644 --- a/docs/guide/experiment_config.rst +++ b/docs/guide/experiment_config.rst @@ -23,9 +23,10 @@ Configuration files are written in ``YAML`` format and are divided into differen `YAML` (Yet Another Markup Language) is a human-readable format used for configuration files. It uses **key: value** pairs to define settings, and indentation to represent nested structures. Lists are denoted by hyphens (`-`). -**Example Basic Configuration** (``config.yml``): +**Example Basic Configuration**: .. code-block:: yaml + :caption: config.yml name: CSEP Experiment time_config: diff --git a/docs/guide/model_config.rst b/docs/guide/model_config.rst index 5678855..fe47848 100644 --- a/docs/guide/model_config.rst +++ b/docs/guide/model_config.rst @@ -11,6 +11,7 @@ In the experiment ``config.yml`` file (See :ref:`experiment_config`), the parame **Example**: .. code-block:: yaml + :caption: model_config.yml - MODEL_1 NAME: parameter_1: value diff --git a/docs/tutorials/case_a.rst b/docs/tutorials/case_a.rst index 45dd200..a69c15b 100644 --- a/docs/tutorials/case_a.rst +++ b/docs/tutorials/case_a.rst @@ -116,7 +116,7 @@ Models Evaluations ~~~~~~~~~~~ - The experiment's evaluations are defined in the ``tests`` inset. It should be a list of test names, making reference to their function and plotting function. These can be either defined in ``pycsep`` (see :doc:`pycsep:concepts/evaluations`) or manually. In this example, we employ the consistency N-test: its function is :func:`csep.core.poisson_evaluations.number_test`, whereas its plotting function correspond to :func:`csep.utils.plots.plot_poisson_consistency_test` + The experiment's evaluations are defined in the ``tests`` inset. It should be a list of test names making reference to their function and plotting function. These can be either from **pyCSEP** (see :doc:`pycsep:concepts/evaluations`) or defined manually. Here, we use the Poisson consistency N-test: its function is :func:`poisson_evaluations.number_test ` with a plotting function :func:`plot_poisson_consistency_test ` .. literalinclude:: ../../tutorials/case_a/config.yml :caption: tutorials/case_a/config.yml diff --git a/docs/tutorials/case_c.rst b/docs/tutorials/case_c.rst index d6b74d2..a27f771 100644 --- a/docs/tutorials/case_c.rst +++ b/docs/tutorials/case_c.rst @@ -64,7 +64,7 @@ Time Evaluations ~~~~~~~~~~~ - The experiment's evaluations are defined in ``tests.yml``, which can now include temporal evaluations (see :func:`~floatcsep.utils.helpers.sequential_likelihood`, :func:`~floatcsep.utils.helpers.sequential_information_gain`, :func:`~floatcsep.utils.helpers.plot_sequential_likelihood`). + The experiment's evaluations are defined in ``tests.yml``, which can now include temporal evaluations (see :obj:`~floatcsep.utils.helpers.sequential_likelihood`, :obj:`~floatcsep.utils.helpers.sequential_information_gain`, :obj:`~floatcsep.utils.helpers.plot_sequential_likelihood`). .. literalinclude:: ../../tutorials/case_c/tests.yml :language: yaml @@ -77,7 +77,7 @@ Evaluations Results ------- -The :obj:`~floatcsep.cmd.main.run` command +The :obj:`~floatcsep.commands.main.run` command .. code-block:: console diff --git a/tutorials/case_f/tests.yml b/tutorials/case_f/tests.yml index 7966964..e2bf37a 100644 --- a/tutorials/case_f/tests.yml +++ b/tutorials/case_f/tests.yml @@ -1,19 +1,18 @@ - Catalog_N-test: func: catalog_evaluations.number_test plot_func: - - plot_number_test: - plot_args: - title: 1 - name: 1 - - plot_consistency_test: - plot_kwargs: - one_sided_lower: True + - plot_number_test: + plot_args: + title: Test distribution + - plot_consistency_test: + plot_kwargs: + one_sided_lower: True - Catalog_S-test: func: catalog_evaluations.spatial_test plot_func: - - plot_consistency_test: - plot_kwargs: - one_sided_lower: True + - plot_consistency_test: + plot_kwargs: + one_sided_lower: True diff --git a/tutorials/case_g/tests.yml b/tutorials/case_g/tests.yml index 67315da..445ed39 100644 --- a/tutorials/case_g/tests.yml +++ b/tutorials/case_g/tests.yml @@ -1,13 +1,12 @@ - Catalog_N-test: func: catalog_evaluations.number_test plot_func: - - plot_number_test: - plot_args: - title: 1 - name: 1 - - plot_consistency_test: - plot_kwargs: - one_sided_lower: True + - plot_number_test: + plot_args: + title: Test distribution + - plot_consistency_test: + plot_kwargs: + one_sided_lower: True